Clean Code

This commit is contained in:
dsk-minchulahn
2023-12-19 13:03:29 +09:00
parent 947561ce1d
commit 0273450ff6
4237 changed files with 0 additions and 7447 deletions

View File

@@ -0,0 +1,8 @@
- name: 'Provision Image'
hosts: default
become: true
tasks:
- name: echo hello
command: echo "Not Valid Ruby Version"

View File

@@ -0,0 +1,54 @@
packer {
required_plugins {
amazon = {
version = ">= 0.0.2"
source = "github.com/hashicorp/amazon"
}
}
}
variable "ami_prefix" {
type = string
default = "datasaker-bastion-ubuntu2004"
}
locals {
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
}
source "amazon-ebs" "datasaker-bastion-ubuntu2004" {
ami_name = "${var.ami_prefix}-${local.timestamp}"
instance_type = "t3.small"
region = "ap-northeast-2"
source_ami_filter {
filters = {
image-id = "ami-0ea5eb4b05645aa8a"
root-device-type = "ebs"
virtualization-type = "hvm"
}
most_recent = true
owners = ["099720109477"]
}
tags = {
source_ami_name = "{{ .SourceAMIName }}"
}
ssh_username = "ubuntu"
}
build {
name = "datasaker-bastion-packer"
sources = [
"source.amazon-ebs.datasaker-bastion-ubuntu2004"
]
provisioner "ansible" {
playbook_file = "./datasaker-bastion-ansible-install_apt_packages.yaml"
extra_arguments = [
"--become",
]
ansible_env_vars = [
"ANSIBLE_HOST_KEY_CHECKING=False",
]
}
}

View File

@@ -0,0 +1,18 @@
- name: 'Provision Image'
hosts: default
become: true
tasks:
- name: echo hello
command: echo "Not Valid Ruby Version"
- name: Update apt repo and cache on all Debian/Ubuntu boxes
apt: update_cache=yes cache_valid_time=3600
- name: Install cifs-utils
apt: name=cifs-utils state=latest update_cache=yes
- name: Install nfs-common
apt: name=nfs-common state=latest update_cache=yes

View File

@@ -0,0 +1,57 @@
packer {
required_plugins {
amazon = {
version = ">= 0.0.2"
source = "github.com/hashicorp/amazon"
}
}
}
variable "ami_prefix" {
type = string
default = "datasaker-node-ubuntu2004"
}
locals {
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
}
# source 블록에는 실제 빌드할 이미지에 대한 스펙을 정의
source "amazon-ebs" "datasaker-node-ubuntu2004" {
ami_name = "${var.ami_prefix}-${local.timestamp}"
instance_type = "t3.small"
region = "ap-northeast-2"
source_ami_filter {
filters = {
image-id = "ami-0ea5eb4b05645aa8a"
root-device-type = "ebs"
virtualization-type = "hvm"
}
most_recent = true
owners = ["099720109477"]
}
tags = {
source_ami_name = "{{ .SourceAMIName }}"
}
ssh_username = "ubuntu"
}
build {
name = "datasaker-packer"
sources = [
"source.amazon-ebs.datasaker-node-ubuntu2004"
]
provisioner "ansible" {
playbook_file = "./datasaker-node-ansible-install_apt_packages.yaml"
extra_arguments = [
"--become",
]
ansible_env_vars = [
"ANSIBLE_HOST_KEY_CHECKING=False",
]
}
}

View File

@@ -0,0 +1,6 @@
---
- hosts: default
become: true
roles:
- bastion
- security-settings

View File

@@ -0,0 +1,8 @@
---
- hosts: default
become: true
roles:
- docker
vars:
username: root
password: saasadmin1234!@#$

View File

@@ -0,0 +1,9 @@
---
- hosts: default
become: true
roles:
- node
- security-settings
vars:
username: root
password: saasadmin1234!@#$

View File

@@ -0,0 +1,7 @@
---
- name: 'Provision Image'
hosts: default
become: true
roles:
- bastion
- security-settings

View File

@@ -0,0 +1,3 @@
---
- name: echo hello
command: echo "Not Valid Ruby Version"

View File

@@ -0,0 +1,10 @@
---
- name: Reload systemd configuration
service:
daemon_reload: True
- name: Restart docker service
service:
name: docker
enabled: true
state: restarted

View File

@@ -0,0 +1,86 @@
---
- name: Update and upgrade yum packages
yum:
name: "*"
state: latest
- name: Install yum packages
yum:
name: "{{ item }}"
state: present
with_items:
- python-pip
- yum-utils
- device-mapper-persistent-data
- lvm2
- amazon-linux-extras
- name: Add extras repository
shell: yum-config-manager --enable extras
- name: Disable firewalld
systemd: name=firewalld state=stopped
ignore_errors: yes
tags:
- install
- atomic
- firewalld
- name: Disable SWAP since kubernetes can't work with swap enabled (1/2)
command: 'swapoff -a'
# - name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2)
# replace:
# path: /etc/fstab
# regexp: '^([^#].*?\sswap\s+sw\s+.*)$'
# replace: '# \1'
- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2)
become: true
lineinfile:
path: /etc/fstab
regexp: '^/dev/mapper/.*swap'
line: '# {{ item }}'
# when: item is search('^/dev/mapper/.*swap')
loop: "{{ lookup('file', '/etc/fstab').split('\n') }}"
- name: Add br_netfilter to module autoload
lineinfile:
path: /etc/modules-load.d/k8s2.conf
line: "{{ item }}"
create: true
with_items:
- 'overlay'
- 'br_netfilter'
- name: Add br_netfilter to module autoload
modprobe:
name: "{{ item }}"
state: present
become: true
with_items:
- 'overlay'
- 'br_netfilter'
- name: Add br_netfilter to module autoload
lineinfile:
path: /etc/sysctl.d/k8s.conf
line: "{{ item }}"
create: true
with_items:
- 'net.bridge.bridge-nf-call-iptables = 1'
- 'net.bridge.bridge-nf-call-ip6tables = 1'
- 'net.ipv4.ip_forward = 1'
- name: Disable net.bridge.bridge-nf-call-iptables
sysctl:
name: "{{ item }}"
value: 1
with_items:
- 'net.bridge.bridge-nf-call-iptables'
- 'net.bridge.bridge-nf-call-ip6tables'
- name: Disable net.ipv4.ip_forward
sysctl:
name: net.ipv4.ip_forward
value: "1"

View File

@@ -0,0 +1,66 @@
---
- name: Update and upgrade apt packages
apt:
upgrade: yes
update_cache: yes
force_apt_get: yes
cache_valid_time: 86400
- name: Install apt packages
apt:
name: ['apt-transport-https', 'ca-certificates', 'curl', 'gnupg', 'lsb-release']
state: present
- name: Disable ufw
command: 'ufw disable'
when: ansible_distribution_version == '20.04'
- name: Disable SWAP since kubernetes can't work with swap enabled (1/2)
command: 'swapoff -a'
- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2)
replace:
path: /etc/fstab
regexp: '^([^#].*?\sswap\s+sw\s+.*)$'
replace: '# \1'
- name: Add br_netfilter to module autoload
lineinfile:
path: /etc/modules-load.d/k8s.conf
line: "{{ item }}"
create: true
with_items:
- 'overlay'
- 'br_netfilter'
- name: Add br_netfilter to module autoload
modprobe:
name: "{{ item }}"
state: present
become: true
with_items:
- 'overlay'
- 'br_netfilter'
- name: Add br_netfilter to module autoload
lineinfile:
path: /etc/sysctl.d/k8s.conf
line: "{{ item }}"
create: true
with_items:
- 'net.bridge.bridge-nf-call-iptables = 1'
- 'net.bridge.bridge-nf-call-ip6tables = 1'
- 'net.ipv4.ip_forward = 1'
- name: Disable net.bridge.bridge-nf-call-iptables
sysctl:
name: "{{ item }}"
value: 1
with_items:
- 'net.bridge.bridge-nf-call-iptables'
- 'net.bridge.bridge-nf-call-ip6tables'
- name: Disable net.ipv4.ip_forward
sysctl:
name: net.ipv4.ip_forward
value: "1"

View File

@@ -0,0 +1,53 @@
---
- name: Install docker-ce (centos) via amazon-linux-extras packages
shell: "amazon-linux-extras install docker -y"
- name: Ensure Python pip packages are installed
pip:
name: "{{ item }}"
with_items:
- boto
- boto3
- docker-compose
#- name: Add docker script
# command: curl -fsSL https://get.docker.com -o /root/get-docker.sh
#
#- name: install docker
# command: sh /root/get-docker.sh
#
- name: Create docker configuration directory
file:
path: /etc/docker
state: directory
#- name: Install required packages
# yum:
# name: ['docker-ce']
# state: present
# notify:
# - Reload systemd configuration
# - Restart docker service
- name: Configure docker
template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
notify:
- Reload systemd configuration
- Restart docker service
#- name: Delete containerd config
# file:
# path: /etc/containerd/config.toml
# state: absent
# notify:
# - Restart containerd service
- meta: flush_handlers
- name: Enable docker service
service:
name: docker
enabled: True
state: started

View File

@@ -0,0 +1,19 @@
---
- name: Add docker script
command: curl -fsSL https://get.docker.com -o /root/get-docker.sh
- name: install docker
command: sh /root/get-docker.sh
- name: Create docker configuration directory
file:
path: /etc/docker
state: directory
- name: Configure docker
template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
notify:
- Reload systemd configuration
- Restart docker service

View File

@@ -0,0 +1,19 @@
---
- name: Gather Ansible Facts
ansible.builtin.setup:
- include: 00-amazon-os-main.yml
tags: amazon
when: ansible_facts.os_family == 'RedHat'
- include: 00-ubuntu-os-main.yml
tags: ubuntu
when: ansible_facts.os_family == 'Debian'
- include: 01-amazon-os-docker.yml
tags: cent-docker
when: ansible_facts.os_family == 'RedHat'
- include: 01-ubuntu-os-docker.yml
tags: ubuntu-docker
when: ansible_facts.os_family == 'Debian'

View File

@@ -0,0 +1,8 @@
---
- name: Add pam_tally2.so
template:
src: sysctl.j2
dest: /etc/sysctl.conf
owner: root
group: root
mode: 0644

View File

@@ -0,0 +1,27 @@
#
# /etc/pam.d/common-auth - authentication settings common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}}
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
auth required pam_permit.so
# since the modules above will each just jump around
# and here are more per-package modules (the "Additional" block)
auth optional pam_cap.so
# end of pam-auth-update config

View File

@@ -0,0 +1,9 @@
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"insecure-registries": ["10.10.31.243:5000"]
}

View File

@@ -0,0 +1,50 @@
# Configuration for systemwide password quality limits
# Defaults:
#
# Number of characters in the new password that must not be present in the
# old password.
# difok = 5
#
# Minimum acceptable size for the new password (plus one if
# credits are not disabled which is the default). (See pam_cracklib manual.)
# Cannot be set to lower value than 6.
minlen = {{pwquality_minlen}}
#
# The maximum credit for having digits in the new password. If less than 0
# it is the minimum number of digits in the new password.
dcredit = {{pwquality_dcredit}}
#
# The maximum credit for having uppercase characters in the new password.
# If less than 0 it is the minimum number of uppercase characters in the new
# password.
ucredit = {{pwquality_ucredit}}
#
# The maximum credit for having lowercase characters in the new password.
# If less than 0 it is the minimum number of lowercase characters in the new
# password.
lcredit = {{pwquality_lcredit}}
#
# The maximum credit for having other characters in the new password.
# If less than 0 it is the minimum number of other characters in the new
# password.
ocredit = {{pwquality_ocredit}}
#
# The minimum number of required classes of characters for the new
# password (digits, uppercase, lowercase, others).
# minclass = 0
#
# The maximum number of allowed consecutive same characters in the new password.
# The check is disabled if the value is 0.
maxrepeat = {{pwquality_maxrepeat}}
#
# The maximum number of allowed consecutive characters of the same class in the
# new password.
# The check is disabled if the value is 0.
# maxclassrepeat = 0
#
# Whether to check for the words from the passwd entry GECOS string of the user.
# The check is enabled if the value is not 0.
# gecoscheck = 0
#
# Path to the cracklib dictionaries. Default is to use the cracklib default.
# dictpath =

View File

@@ -0,0 +1,82 @@
#
# /etc/sysctl.conf - Configuration file for setting system variables
# See /etc/sysctl.d/ for additional system variables.
# See sysctl.conf (5) for information.
#
#kernel.domainname = example.com
# Uncomment the following to stop low-level messages on console
#kernel.printk = 3 4 1 3
###################################################################
# Functions previously found in netbase
#
# Uncomment the next two lines to enable Spoof protection (reverse-path filter)
# Turn on Source Address Verification in all interfaces to
# prevent some spoofing attacks
#net.ipv4.conf.default.rp_filter=1
#net.ipv4.conf.all.rp_filter=1
# Uncomment the next line to enable TCP/IP SYN cookies
# See http://lwn.net/Articles/277146/
# Note: This may impact IPv6 TCP sessions too
#net.ipv4.tcp_syncookies=1
# Uncomment the next line to enable packet forwarding for IPv4
#net.ipv4.ip_forward=1
# Uncomment the next line to enable packet forwarding for IPv6
# Enabling this option disables Stateless Address Autoconfiguration
# based on Router Advertisements for this host
#net.ipv6.conf.all.forwarding=1
###################################################################
# Additional settings - these settings can improve the network
# security of the host and prevent against some network attacks
# including spoofing attacks and man in the middle attacks through
# redirection. Some network environments, however, require that these
# settings are disabled so review and enable them as needed.
#
# Do not accept ICMP redirects (prevent MITM attacks)
#net.ipv4.conf.all.accept_redirects = 0
#net.ipv6.conf.all.accept_redirects = 0
# _or_
# Accept ICMP redirects only for gateways listed in our default
# gateway list (enabled by default)
# net.ipv4.conf.all.secure_redirects = 1
#
# Do not send ICMP redirects (we are not a router)
#net.ipv4.conf.all.send_redirects = 0
#
# Do not accept IP source route packets (we are not a router)
#net.ipv4.conf.all.accept_source_route = 0
#net.ipv6.conf.all.accept_source_route = 0
#
# Log Martian Packets
#net.ipv4.conf.all.log_martians = 1
#
###################################################################
# Magic system request Key
# 0=disable, 1=enable all, >1 bitmask of sysrq functions
# See https://www.kernel.org/doc/html/latest/admin-guide/sysrq.html
# for what other values do
#kernel.sysrq=438
vm.dirty_background_ratio = 5
vm.dirty_ratio = 80
net.core.default_qdisc = fq
net.core.rmem_max = 268435456
net.core.wmem_max = 268435456
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.all.arp_filter = 1
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.default.arp_filter = 1
net.ipv4.tcp_congestion_control = htcp
net.ipv4.tcp_no_metrics_save = 1
net.ipv4.tcp_rmem = 4096 87380 134217728
net.ipv4.tcp_wmem = 4096 65536 134217728

View File

@@ -0,0 +1,5 @@
helm_checksum: sha256:3156e4fe5f034e5b127cf165d61a8a1c48eb7a73b14689b273de5e6117df6fe2
helm_version: v3.2.3
kubernetes_version: 1.25.2
kubernetes_middleware_namespace: dsk-middle

View File

@@ -0,0 +1,12 @@
dependencies:
- name: zookeeper
repository: https://charts.helm.sh/incubator
version: 2.1.4
- name: mysql
repository: https://charts.helm.sh/stable
version: 1.6.4
- name: postgresql
repository: https://charts.helm.sh/stable
version: 8.6.4
digest: sha256:fb2ab5eed4b4fc00eee5f23764209d7cb494a07161439ea28d85ed3741eaf7f7
generated: "2022-07-29T12:12:44.428393074+09:00"

View File

@@ -0,0 +1,41 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v2
appVersion: 0.23.0
description: Apache Druid is a high performance real-time analytics database.
name: druid
dependencies:
- name: zookeeper
version: 2.1.4
repository: https://charts.helm.sh/incubator
condition: zookeeper.enabled
- name: mysql
version: 1.6.4
repository: https://charts.helm.sh/stable
condition: mysql.enabled
- name: postgresql
version: 8.6.4
repository: https://charts.helm.sh/stable
condition: postgresql.enabled
version: 0.3.1
home: https://druid.apache.org/
icon: https://druid.apache.org/img/favicon.png
sources:
- https://github.com/apache/druid
keywords:
- olap
- database
- analytics

View File

@@ -0,0 +1,212 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
# Apache Druid
[Apache Druid](https://druid.apache.org/) is a high performance real-time analytics database.
## Dependency Update
Before you install the Druid Chart, update the dependencies :
```bash
helm dependency update helm/druid
```
## Install Chart
To install the Druid Chart into your Kubernetes cluster :
```bash
helm install druid helm/druid --namespace dev --create-namespace
```
After installation succeeds, you can get a status of Chart
```bash
helm status druid -n dev
```
If you want to delete your Chart, use this command:
```bash
helm uninstall druid -n dev
```
### Helm ingresses
The Chart provides ingress configuration to allow customization the installation by adapting
the `values.yaml` depending on your setup.
Please read the comments in the `values.yaml` file for more details on how to configure your reverse
proxy or load balancer.
### Chart Prefix
This Helm automatically prefixes all names using the release name to avoid collisions.
### URL prefix
This chart exposes 6 endpoints:
- Druid Overlord
- Druid Broker
- Druid Coordinator
- Druid Historical
- Druid Middle Manager
- Druid Router
### Druid configuration
Druid configuration can be changed by using environment variables from Docker image.
See the
[Druid Docker entry point](https://github.com/apache/druid/blob/master/distribution/docker/druid.sh)
for more informations
### Middle Manager and Historical Statefulset
Middle Managers and Historicals uses StatefulSet. Persistence is enabled by default.
## Helm chart Configuration
The following table lists the configurable parameters of the Druid chart and their default values.
| Parameter | Description | Default |
|------------------------------------------|---------------------------------------------------------|--------------------------------------------|
| `image.repository` | container image name | `apache/druid` |
| `image.tag` | container image tag | `0.19.0` |
| `image.pullPolicy` | container pull policy | `IfNotPresent` |
| `image.pullSecrets` | image pull secrest for private repositoty | `[]` |
| `configMap.enabled` | enable druid configuration as configmap | `true` |
| `configVars` | druid configuration variables for all components | `` |
| `gCloudStorage.enabled` | look for secret to set google cloud credentials | `false` |
| `gCloudStorage.secretName` | secretName to be mounted as google cloud credentials | `false` |
| `broker.enabled` | enable broker | `true` |
| `broker.name` | broker component name | `broker` |
| `broker.replicaCount` | broker node replicas (deployment) | `1` |
| `broker.port` | port of broker component | `8082` |
| `broker.serviceType` | service type for service | `ClusterIP` |
| `broker.resources` | broker node resources requests & limits | `{}` |
| `broker.podAnnotations` | broker deployment annotations | `{}` |
| `broker.nodeSelector` | Node labels for broker pod assignment | `{}` |
| `broker.tolerations` | broker tolerations | `[]` |
| `broker.config` | broker private config such as `JAVA_OPTS` | |
| `broker.affinity` | broker affinity policy | `{}` |
| `broker.ingress.enabled` | enable ingress | `false` |
| `broker.ingress.hosts` | hosts for the broker api | `[ "chart-example.local" ]` |
| `broker.ingress.path` | path of the broker api | `/` |
| `broker.ingress.annotations` | annotations for the broker api ingress | `{}` |
| `broker.ingress.tls` | TLS configuration for the ingress | `[]` |
| `coordinator.enabled` | enable coordinator | `true` |
| `coordinator.name` | coordinator component name | `coordinator` |
| `coordinator.replicaCount` | coordinator node replicas (deployment) | `1` |
| `coordinator.port` | port of coordinator component | `8081` |
| `coordinator.serviceType` | service type for service | `ClusterIP` |
| `coordinator.resources` | coordinator node resources requests & limits | `{}` |
| `coordinator.podAnnotations` | coordinator Deployment annotations | `{}` |
| `coordinator.nodeSelector` | node labels for coordinator pod assignment | `{}` |
| `coordinator.tolerations` | coordinator tolerations | `[]` |
| `coordinator.config` | coordinator private config such as `JAVA_OPTS` | |
| `coordinator.affinity` | coordinator affinity policy | `{}` |
| `coordinator.ingress.enabled` | enable ingress | `false` |
| `coordinator.ingress.hosts` | hosts for the coordinator api | `[ "chart-example.local" ]` |
| `coordinator.ingress.path` | path of the coordinator api | `/` |
| `coordinator.ingress.annotations` | annotations for the coordinator api ingress | `{}` |
| `coordinator.ingress.tls` | TLS configuration for the ingress | `[]` |
| `overlord.enabled` | enable overlord | `false` |
| `overlord.name` | overlord component name | `overlord` |
| `overlord.replicaCount` | overlord node replicas (deployment) | `1` |
| `overlord.port` | port of overlord component | `8081` |
| `overlord.serviceType` | service type for service | `ClusterIP` |
| `overlord.resources` | overlord node resources requests & limits | `{}` |
| `overlord.podAnnotations` | overlord Deployment annotations | `{}` |
| `overlord.nodeSelector` | node labels for overlord pod assignment | `{}` |
| `overlord.tolerations` | overlord tolerations | `[]` |
| `overlord.config` | overlord private config such as `JAVA_OPTS` | |
| `overlord.affinity` | overlord affinity policy | `{}` |
| `overlord.ingress.enabled` | enable ingress | `false` |
| `overlord.ingress.hosts` | hosts for the overlord api | `[ "chart-example.local" ]` |
| `overlord.ingress.path` | path of the overlord api | `/` |
| `overlord.ingress.annotations` | annotations for the overlord api ingress | `{}` |
| `overlord.ingress.tls` | TLS configuration for the ingress | `[]` |
| `historical.enabled` | enable historical | `true` |
| `historical.name` | historical component name | `historical` |
| `historical.replicaCount` | historical node replicas (statefulset) | `1` |
| `historical.port` | port of historical component | `8083` |
| `historical.serviceType` | service type for service | `ClusterIP` |
| `historical.resources` | historical node resources requests & limits | `{}` |
| `historical.podAnnotations` | historical Deployment annotations | `{}` |
| `historical.nodeSelector` | node labels for historical pod assignment | `{}` |
| `historical.securityContext` | custom security context for historical containers | `{ fsGroup: 1000 }` |
| `historical.tolerations` | historical tolerations | `[]` |
| `historical.config` | historical node private config such as `JAVA_OPTS` | |
| `historical.persistence.enabled` | historical persistent enabled/disabled | `true` |
| `historical.persistence.size` | historical persistent volume size | `4Gi` |
| `historical.persistence.storageClass` | historical persistent volume Class | `nil` |
| `historical.persistence.accessMode` | historical persistent Access Mode | `ReadWriteOnce` |
| `historical.antiAffinity` | historical anti-affinity policy | `soft` |
| `historical.nodeAffinity` | historical node affinity policy | `{}` |
| `historical.ingress.enabled` | enable ingress | `false` |
| `historical.ingress.hosts` | hosts for the historical api | `[ "chart-example.local" ]` |
| `historical.ingress.path` | path of the historical api | `/` |
| `historical.ingress.annotations` | annotations for the historical api ingress | `{}` |
| `historical.ingress.tls` | TLS configuration for the ingress | `[]` |
| `middleManager.enabled` | enable middleManager | `true` |
| `middleManager.name` | middleManager component name | `middleManager` |
| `middleManager.replicaCount` | middleManager node replicas (statefulset) | `1` |
| `middleManager.port` | port of middleManager component | `8091` |
| `middleManager.serviceType` | service type for service | `ClusterIP` |
| `middleManager.resources` | middleManager node resources requests & limits | `{}` |
| `middleManager.podAnnotations` | middleManager Deployment annotations | `{}` |
| `middleManager.nodeSelector` | Node labels for middleManager pod assignment | `{}` |
| `middleManager.securityContext` | custom security context for middleManager containers | `{ fsGroup: 1000 }` |
| `middleManager.tolerations` | middleManager tolerations | `[]` |
| `middleManager.config` | middleManager private config such as `JAVA_OPTS` | |
| `middleManager.persistence.enabled` | middleManager persistent enabled/disabled | `true` |
| `middleManager.persistence.size` | middleManager persistent volume size | `4Gi` |
| `middleManager.persistence.storageClass` | middleManager persistent volume Class | `nil` |
| `middleManager.persistence.accessMode` | middleManager persistent Access Mode | `ReadWriteOnce` |
| `middleManager.antiAffinity` | middleManager anti-affinity policy | `soft` |
| `middleManager.nodeAffinity` | middleManager node affinity policy | `{}` |
| `middleManager.autoscaling.enabled` | enable horizontal pod autoscaling | `false` |
| `middleManager.autoscaling.minReplicas` | middleManager autoscaling min replicas | `2` |
| `middleManager.autoscaling.maxReplicas` | middleManager autoscaling max replicas | `5` |
| `middleManager.autoscaling.metrics` | middleManager autoscaling metrics | `{}` |
| `middleManager.ingress.enabled` | enable ingress | `false` |
| `middleManager.ingress.hosts` | hosts for the middleManager api | `[ "chart-example.local" ]` |
| `middleManager.ingress.path` | path of the middleManager api | `/` |
| `middleManager.ingress.annotations` | annotations for the middleManager api ingress | `{}` |
| `middleManager.ingress.tls` | TLS configuration for the ingress | `[]` |
| `router.enabled` | enable router | `false` |
| `router.name` | router component name | `router` |
| `router.replicaCount` | router node replicas (deployment) | `1` |
| `router.port` | port of router component | `8888` |
| `router.serviceType` | service type for service | `ClusterIP` |
| `router.resources` | router node resources requests & limits | `{}` |
| `router.podAnnotations` | router Deployment annotations | `{}` |
| `router.nodeSelector` | node labels for router pod assignment | `{}` |
| `router.tolerations` | router tolerations | `[]` |
| `router.config` | router private config such as `JAVA_OPTS` | |
| `router.affinity` | router affinity policy | `{}` |
| `router.ingress.enabled` | enable ingress | `false` |
| `router.ingress.hosts` | hosts for the router api | `[ "chart-example.local" ]` |
| `router.ingress.path` | path of the router api | `/` |
| `router.ingress.annotations` | annotations for the router api ingress | `{}` |
| `router.ingress.tls` | TLS configuration for the ingress | `[]` |
Full and up-to-date documentation can be found in the comments of the `values.yaml` file.

View File

@@ -0,0 +1,2 @@
.git
OWNERS

View File

@@ -0,0 +1,21 @@
apiVersion: v1
appVersion: 5.7.30
description: Fast, reliable, scalable, and easy to use open-source relational database
system.
engine: gotpl
home: https://www.mysql.com/
icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png
keywords:
- mysql
- database
- sql
maintainers:
- email: o.with@sportradar.com
name: olemarkus
- email: viglesias@google.com
name: viglesiasce
name: mysql
sources:
- https://github.com/kubernetes/charts
- https://github.com/docker-library/mysql
version: 1.6.4

View File

@@ -0,0 +1,242 @@
# MySQL
[MySQL](https://MySQL.org) is one of the most popular database servers in the world. Notable users include Wikipedia, Facebook and Google.
## Introduction
This chart bootstraps a single node MySQL deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+ with Beta APIs enabled
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install --name my-release stable/mysql
```
The command deploys MySQL on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
By default a random password will be generated for the root user. If you'd like to set your own password change the mysqlRootPassword
in the values.yaml.
You can retrieve your root password by running the following command. Make sure to replace [YOUR_RELEASE_NAME]:
printf $(printf '\%o' `kubectl get secret [YOUR_RELEASE_NAME]-mysql -o jsonpath="{.data.mysql-root-password[*]}"`)
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete --purge my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release completely.
## Configuration
The following table lists the configurable parameters of the MySQL chart and their default values.
| Parameter | Description | Default |
| -------------------------------------------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------------- |
| `args` | Additional arguments to pass to the MySQL container. | `[]` |
| `initContainer.resources` | initContainer resource requests/limits | Memory: `10Mi`, CPU: `10m` |
| `image` | `mysql` image repository. | `mysql` |
| `imageTag` | `mysql` image tag. | `5.7.14` |
| `busybox.image` | `busybox` image repository. | `busybox` |
| `busybox.tag` | `busybox` image tag. | `1.29.3` |
| `testFramework.enabled` | `test-framework` switch. | `true` |
| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` |
| `testFramework.tag` | `test-framework` image tag. | `0.4.0` |
| `imagePullPolicy` | Image pull policy | `IfNotPresent` |
| `existingSecret` | Use Existing secret for Password details | `nil` |
| `extraVolumes` | Additional volumes as a string to be passed to the `tpl` function | |
| `extraVolumeMounts` | Additional volumeMounts as a string to be passed to the `tpl` function | |
| `extraInitContainers` | Additional init containers as a string to be passed to the `tpl` function | |
| `mysqlRootPassword` | Password for the `root` user. Ignored if existing secret is provided | Random 10 characters |
| `mysqlUser` | Username of new user to create. | `nil` |
| `mysqlPassword` | Password for the new user. Ignored if existing secret is provided | Random 10 characters |
| `mysqlDatabase` | Name for new database to create. | `nil` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 |
| `readinessProbe.periodSeconds` | How often to perform the probe | 10 |
| `readinessProbe.timeoutSeconds` | When the probe times out | 1 |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `persistence.enabled` | Create a volume to store data | true |
| `persistence.size` | Size of persistent volume claim | 8Gi RW |
| `persistence.storageClass` | Type of persistent volume claim | nil |
| `persistence.accessMode` | ReadWriteOnce or ReadOnly | ReadWriteOnce |
| `persistence.existingClaim` | Name of existing persistent volume | `nil` |
| `persistence.subPath` | Subdirectory of the volume to mount | `nil` |
| `persistence.annotations` | Persistent Volume annotations | {} |
| `nodeSelector` | Node labels for pod assignment | {} |
| `affinity` | Affinity rules for pod assignment | {} |
| `tolerations` | Pod taint tolerations for deployment | {} |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image` | Exporter image | `prom/mysqld-exporter` |
| `metrics.imageTag` | Exporter image | `v0.10.0` |
| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` |
| `metrics.resources` | Exporter resource requests/limit | `nil` |
| `metrics.livenessProbe.initialDelaySeconds` | Delay before metrics liveness probe is initiated | 15 |
| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `metrics.readinessProbe.initialDelaySeconds` | Delay before metrics readiness probe is initiated | 5 |
| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 1 |
| `metrics.flags` | Additional flags for the mysql exporter to use | `[]` |
| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` |
| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` |
| `configurationFiles` | List of mysql configuration files | `nil` |
| `configurationFilesPath` | Path of mysql configuration files | `/etc/mysql/conf.d/` |
| `securityContext.enabled` | Enable security context (mysql pod) | `false` |
| `securityContext.fsGroup` | Group ID for the container (mysql pod) | 999 |
| `securityContext.runAsUser` | User ID for the container (mysql pod) | 999 |
| `service.annotations` | Kubernetes annotations for mysql | {} |
| `service.type` | Kubernetes service type | ClusterIP |
| `service.loadBalancerIP` | LoadBalancer service IP | `""` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the mysql.fullname template |
| `ssl.enabled` | Setup and use SSL for MySQL connections | `false` |
| `ssl.secret` | Name of the secret containing the SSL certificates | mysql-ssl-certs |
| `ssl.certificates[0].name` | Name of the secret containing the SSL certificates | `nil` |
| `ssl.certificates[0].ca` | CA certificate | `nil` |
| `ssl.certificates[0].cert` | Server certificate (public key) | `nil` |
| `ssl.certificates[0].key` | Server key (private key) | `nil` |
| `imagePullSecrets` | Name of Secret resource containing private registry credentials | `nil` |
| `initializationFiles` | List of SQL files which are run after the container started | `nil` |
| `timezone` | Container and mysqld timezone (TZ env) | `nil` (UTC depending on image) |
| `podAnnotations` | Map of annotations to add to the pods | `{}` |
| `podLabels` | Map of labels to add to the pods | `{}` |
| `priorityClassName` | Set pod priorityClassName | `{}` |
| `deploymentAnnotations` | Map of annotations for deployment | `{}` |
| `strategy` | Update strategy policy | `{type: "Recreate"}` |
Some of the parameters above map to the env variables defined in the [MySQL DockerHub image](https://hub.docker.com/_/mysql/).
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
$ helm install --name my-release \
--set mysqlRootPassword=secretpassword,mysqlUser=my-user,mysqlPassword=my-password,mysqlDatabase=my-database \
stable/mysql
```
The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install --name my-release -f values.yaml stable/mysql
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Persistence
The [MySQL](https://hub.docker.com/_/mysql/) image stores the MySQL data and configurations at the `/var/lib/mysql` path of the container.
By default a PersistentVolumeClaim is created and mounted into that directory. In order to disable this functionality
you can change the values.yaml to disable persistence and use an emptyDir instead.
> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."*
**Notice**: You may need to increase the value of `livenessProbe.initialDelaySeconds` when enabling persistence by using PersistentVolumeClaim from PersistentVolume with varying properties. Since its IO performance has impact on the database initialization performance. The default limit for database initialization is `60` seconds (`livenessProbe.initialDelaySeconds` + `livenessProbe.periodSeconds` * `livenessProbe.failureThreshold`). Once such initialization process takes more time than this limit, kubelet will restart the database container, which will interrupt database initialization then causing persisent data in an unusable state.
## Custom MySQL configuration files
The [MySQL](https://hub.docker.com/_/mysql/) image accepts custom configuration files at the path `/etc/mysql/conf.d`. If you want to use a customized MySQL configuration, you can create your alternative configuration files by passing the file contents on the `configurationFiles` attribute. Note that according to the MySQL documentation only files ending with `.cnf` are loaded.
```yaml
configurationFiles:
mysql.cnf: |-
[mysqld]
skip-host-cache
skip-name-resolve
sql-mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
mysql_custom.cnf: |-
[mysqld]
```
## MySQL initialization files
The [MySQL](https://hub.docker.com/_/mysql/) image accepts *.sh, *.sql and *.sql.gz files at the path `/docker-entrypoint-initdb.d`.
These files are being run exactly once for container initialization and ignored on following container restarts.
If you want to use initialization scripts, you can create initialization files by passing the file contents on the `initializationFiles` attribute.
```yaml
initializationFiles:
first-db.sql: |-
CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
second-db.sql: |-
CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
```
## SSL
This chart supports configuring MySQL to use [encrypted connections](https://dev.mysql.com/doc/refman/5.7/en/encrypted-connections.html) with TLS/SSL certificates provided by the user. This is accomplished by storing the required Certificate Authority file, the server public key certificate, and the server private key as a Kubernetes secret. The SSL options for this chart support the following use cases:
* Manage certificate secrets with helm
* Manage certificate secrets outside of helm
## Manage certificate secrets with helm
Include your certificate data in the `ssl.certificates` section. For example:
```
ssl:
enabled: false
secret: mysql-ssl-certs
certificates:
- name: mysql-ssl-certs
ca: |-
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
cert: |-
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
key: |-
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
```
> **Note**: Make sure your certificate data has the correct formatting in the values file.
## Manage certificate secrets outside of helm
1. Ensure the certificate secret exist before installation of this chart.
2. Set the name of the certificate secret in `ssl.secret`.
3. Make sure there are no entries underneath `ssl.certificates`.
To manually create the certificate secret from local files you can execute:
```
kubectl create secret generic mysql-ssl-certs \
--from-file=ca.pem=./ssl/certificate-authority.pem \
--from-file=server-cert.pem=./ssl/server-public-key.pem \
--from-file=server-key.pem=./ssl/server-private-key.pem
```
> **Note**: `ca.pem`, `server-cert.pem`, and `server-key.pem` **must** be used as the key names in this generic secret.
If you are using a certificate your configurationFiles must include the three ssl lines under [mysqld]
```
[mysqld]
ssl-ca=/ssl/ca.pem
ssl-cert=/ssl/server-cert.pem
ssl-key=/ssl/server-key.pem
```

View File

@@ -0,0 +1,43 @@
MySQL can be accessed via port 3306 on the following DNS name from within your cluster:
{{ template "mysql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{- if .Values.existingSecret }}
If you have not already created the mysql password secret:
kubectl create secret generic {{ .Values.existingSecret }} --namespace {{ .Release.Namespace }} --from-file=./mysql-root-password --from-file=./mysql-password
{{ else }}
To get your root password run:
MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mysql.fullname" . }} -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo)
{{- end }}
To connect to your database:
1. Run an Ubuntu pod that you can use as a client:
kubectl run -i --tty ubuntu --image=ubuntu:16.04 --restart=Never -- bash -il
2. Install the mysql client:
$ apt-get update && apt-get install mysql-client -y
3. Connect using the mysql cli, then provide your password:
$ mysql -h {{ template "mysql.fullname" . }} -p
To connect to your database directly from outside the K8s cluster:
{{- if contains "NodePort" .Values.service.type }}
MYSQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}')
MYSQL_PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mysql.fullname" . }} -o jsonpath='{.spec.ports[0].nodePort}')
{{- else if contains "ClusterIP" .Values.service.type }}
MYSQL_HOST=127.0.0.1
MYSQL_PORT={{ .Values.service.port }}
# Execute the following command to route the connection:
kubectl port-forward svc/{{ template "mysql.fullname" . }} {{ .Values.service.port }}
{{- end }}
mysql -h ${MYSQL_HOST} -P${MYSQL_PORT} -u root -p${MYSQL_ROOT_PASSWORD}

View File

@@ -0,0 +1,43 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "mysql.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "mysql.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate chart secret name
*/}}
{{- define "mysql.secretName" -}}
{{ default (include "mysql.fullname" .) .Values.existingSecret }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "mysql.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "mysql.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,12 @@
{{- if .Values.configurationFiles }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "mysql.fullname" . }}-configuration
namespace: {{ .Release.Namespace }}
data:
{{- range $key, $val := .Values.configurationFiles }}
{{ $key }}: |-
{{ $val | indent 4}}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,252 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- with .Values.deploymentAnnotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
strategy:
{{ toYaml .Values.strategy | indent 4 }}
selector:
matchLabels:
app: {{ template "mysql.fullname" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "mysql.fullname" . }}
release: {{ .Release.Name }}
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
serviceAccountName: {{ template "mysql.serviceAccountName" . }}
initContainers:
- name: "remove-lost-found"
image: "{{ .Values.busybox.image}}:{{ .Values.busybox.tag }}"
imagePullPolicy: {{ .Values.imagePullPolicy | quote }}
resources:
{{ toYaml .Values.initContainer.resources | indent 10 }}
command: ["rm", "-fr", "/var/lib/mysql/lost+found"]
volumeMounts:
- name: data
mountPath: /var/lib/mysql
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.extraInitContainers }}
{{ tpl .Values.extraInitContainers . | indent 6 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
containers:
- name: {{ template "mysql.fullname" . }}
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
imagePullPolicy: {{ .Values.imagePullPolicy | quote }}
{{- with .Values.args }}
args:
{{- range . }}
- {{ . | quote }}
{{- end }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
env:
{{- if .Values.mysqlAllowEmptyPassword }}
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "true"
{{- end }}
{{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlRootPassword)) }}
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-root-password
{{- if .Values.mysqlAllowEmptyPassword }}
optional: true
{{- end }}
{{- end }}
{{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlPassword)) }}
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-password
{{- if or .Values.mysqlAllowEmptyPassword (empty .Values.mysqlUser) }}
optional: true
{{- end }}
{{- end }}
- name: MYSQL_USER
value: {{ default "" .Values.mysqlUser | quote }}
- name: MYSQL_DATABASE
value: {{ default "" .Values.mysqlDatabase | quote }}
{{- if .Values.timezone }}
- name: TZ
value: {{ .Values.timezone }}
{{- end }}
ports:
- name: mysql
containerPort: 3306
livenessProbe:
exec:
command:
{{- if .Values.mysqlAllowEmptyPassword }}
- mysqladmin
- ping
{{- else }}
- sh
- -c
- "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}"
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
exec:
command:
{{- if .Values.mysqlAllowEmptyPassword }}
- mysqladmin
- ping
{{- else }}
- sh
- -c
- "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}"
{{- end }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
volumeMounts:
- name: data
mountPath: /var/lib/mysql
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.configurationFiles }}
{{- range $key, $val := .Values.configurationFiles }}
- name: configurations
mountPath: {{ $.Values.configurationFilesPath }}{{ $key }}
subPath: {{ $key }}
{{- end -}}
{{- end }}
{{- if .Values.initializationFiles }}
- name: migrations
mountPath: /docker-entrypoint-initdb.d
{{- end }}
{{- if .Values.ssl.enabled }}
- name: certificates
mountPath: /ssl
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{ tpl .Values.extraVolumeMounts . | indent 8 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: "{{ .Values.metrics.image }}:{{ .Values.metrics.imageTag }}"
imagePullPolicy: {{ .Values.metrics.imagePullPolicy | quote }}
{{- if .Values.mysqlAllowEmptyPassword }}
command:
- 'sh'
- '-c'
- 'DATA_SOURCE_NAME="root@(localhost:3306)/" /bin/mysqld_exporter'
{{- else }}
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-root-password
command:
- 'sh'
- '-c'
- 'DATA_SOURCE_NAME="root:$MYSQL_ROOT_PASSWORD@(localhost:3306)/" /bin/mysqld_exporter'
{{- end }}
{{- range $f := .Values.metrics.flags }}
- {{ $f | quote }}
{{- end }}
ports:
- name: metrics
containerPort: 9104
livenessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
readinessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
resources:
{{ toYaml .Values.metrics.resources | indent 10 }}
{{- end }}
volumes:
{{- if .Values.configurationFiles }}
- name: configurations
configMap:
name: {{ template "mysql.fullname" . }}-configuration
{{- end }}
{{- if .Values.initializationFiles }}
- name: migrations
configMap:
name: {{ template "mysql.fullname" . }}-initialization
{{- end }}
{{- if .Values.ssl.enabled }}
- name: certificates
secret:
secretName: {{ .Values.ssl.secret }}
{{- end }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "mysql.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- if .Values.extraVolumes }}
{{ tpl .Values.extraVolumes . | indent 6 }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.initializationFiles }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "mysql.fullname" . }}-initialization
namespace: {{ .Release.Namespace }}
data:
{{- range $key, $val := .Values.initializationFiles }}
{{ $key }}: |-
{{ $val | indent 4}}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,29 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- with .Values.persistence.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,51 @@
{{- if not .Values.existingSecret }}
{{- if or (not .Values.allowEmptyRootPassword) (or .Values.mysqlRootPassword .Values.mysqlPassword) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
type: Opaque
data:
{{ if .Values.mysqlRootPassword }}
mysql-root-password: {{ .Values.mysqlRootPassword | b64enc | quote }}
{{ else }}
{{ if not .Values.allowEmptyRootPassword }}
mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }}
{{ end }}
{{ end }}
{{ if .Values.mysqlPassword }}
mysql-password: {{ .Values.mysqlPassword | b64enc | quote }}
{{ else }}
{{ if not .Values.allowEmptyRootPassword }}
mysql-password: {{ randAlphaNum 10 | b64enc | quote }}
{{ end }}
{{ end }}
{{ end }}
{{- if .Values.ssl.enabled }}
{{ if .Values.ssl.certificates }}
{{- range .Values.ssl.certificates }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
labels:
app: {{ template "mysql.fullname" $ }}
chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}"
release: "{{ $.Release.Name }}"
heritage: "{{ $.Release.Service }}"
type: Opaque
data:
ca.pem: {{ .ca | b64enc }}
server-cert.pem: {{ .cert | b64enc }}
server-key.pem: {{ .key | b64enc }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "mysql.serviceAccountName" . }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ include "mysql.fullname" . }}
release: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
annotations:
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
{{- if and (.Values.metrics.enabled) (.Values.metrics.annotations) }}
{{ toYaml .Values.metrics.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
ports:
- name: mysql
port: {{ .Values.service.port }}
targetPort: mysql
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
port: 9104
targetPort: metrics
{{- end }}
selector:
app: {{ template "mysql.fullname" . }}

View File

@@ -0,0 +1,23 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "mysql.fullname" . }}-test
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
data:
run.sh: |-
{{- if .Values.ssl.enabled | and .Values.mysqlRootPassword }}
@test "Testing SSL MySQL Connection" {
mysql --host={{ template "mysql.fullname" . }} --port={{ .Values.service.port | default "3306" }} --ssl-cert=/ssl/server-cert.pem --ssl-key=ssl/server-key.pem -u root -p{{ .Values.mysqlRootPassword }}
}
{{- else if .Values.mysqlRootPassword }}
@test "Testing MySQL Connection" {
mysql --host={{ template "mysql.fullname" . }} --port={{ .Values.service.port | default "3306" }} -u root -p{{ .Values.mysqlRootPassword }}
}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,54 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: {{ template "mysql.fullname" . }}-test
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
annotations:
"helm.sh/hook": test-success
spec:
initContainers:
- name: test-framework
image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}"
command:
- "bash"
- "-c"
- |
set -ex
# copy bats to tools dir
cp -R /usr/local/libexec/ /tools/bats/
volumeMounts:
- mountPath: /tools
name: tools
containers:
- name: {{ .Release.Name }}-test
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
volumeMounts:
- mountPath: /tests
name: tests
readOnly: true
- mountPath: /tools
name: tools
{{- if .Values.ssl.enabled }}
- name: certificates
mountPath: /ssl
{{- end }}
volumes:
- name: tests
configMap:
name: {{ template "mysql.fullname" . }}-test
- name: tools
emptyDir: {}
{{- if .Values.ssl.enabled }}
- name: certificates
secret:
secretName: {{ .Values.ssl.secret }}
{{- end }}
restartPolicy: Never
{{- end }}

View File

@@ -0,0 +1,231 @@
## mysql image version
## ref: https://hub.docker.com/r/library/mysql/tags/
##
image: "mysql"
imageTag: "5.7.30"
strategy:
type: Recreate
busybox:
image: "busybox"
tag: "1.31.1"
testFramework:
enabled: true
image: "dduportal/bats"
tag: "0.4.0"
## Specify password for root user
##
## Default: random 10 character string
# mysqlRootPassword: testing
## Create a database user
##
# mysqlUser:
## Default: random 10 character string
# mysqlPassword:
## Allow unauthenticated access, uncomment to enable
##
# mysqlAllowEmptyPassword: true
## Create a database
##
# mysqlDatabase:
## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
##
imagePullPolicy: IfNotPresent
## Additionnal arguments that are passed to the MySQL container.
## For example use --default-authentication-plugin=mysql_native_password if older clients need to
## connect to a MySQL 8 instance.
args: []
extraVolumes: |
# - name: extras
# emptyDir: {}
extraVolumeMounts: |
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Persist data to a persistent volume
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
annotations: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Security context
securityContext:
enabled: false
runAsUser: 999
fsGroup: 999
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 100m
# Custom mysql configuration files path
configurationFilesPath: /etc/mysql/conf.d/
# Custom mysql configuration files used to override default mysql settings
configurationFiles: {}
# mysql.cnf: |-
# [mysqld]
# skip-name-resolve
# ssl-ca=/ssl/ca.pem
# ssl-cert=/ssl/server-cert.pem
# ssl-key=/ssl/server-key.pem
# Custom mysql init SQL files used to initialize the database
initializationFiles: {}
# first-db.sql: |-
# CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
# second-db.sql: |-
# CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
metrics:
enabled: false
image: prom/mysqld-exporter
imageTag: v0.10.0
imagePullPolicy: IfNotPresent
resources: {}
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "9104"
livenessProbe:
initialDelaySeconds: 15
timeoutSeconds: 5
readinessProbe:
initialDelaySeconds: 5
timeoutSeconds: 1
flags: []
serviceMonitor:
enabled: false
additionalLabels: {}
## Configure the service
## ref: http://kubernetes.io/docs/user-guide/services/
service:
annotations: {}
## Specify a service type
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
type: ClusterIP
port: 3306
# nodePort: 32000
# loadBalancerIP:
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the mariadb.fullname template
# name:
ssl:
enabled: false
secret: mysql-ssl-certs
certificates:
# - name: mysql-ssl-certs
# ca: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# cert: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# key: |-
# -----BEGIN RSA PRIVATE KEY-----
# ...
# -----END RSA PRIVATE KEY-----
## Populates the 'TZ' system timezone environment variable
## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html
##
## Default: nil (mysql will use image's default timezone, normally UTC)
## Example: 'Australia/Sydney'
# timezone:
# Deployment Annotations
deploymentAnnotations: {}
# To be added to the database server pod(s)
podAnnotations: {}
podLabels: {}
## Set pod priorityClassName
# priorityClassName: {}
## Init container resources defaults
initContainer:
resources:
requests:
memory: 10Mi
cpu: 10m

View File

@@ -0,0 +1,2 @@
.git
OWNERS

View File

@@ -0,0 +1,19 @@
apiVersion: v1
appVersion: 11.7.0
deprecated: true
description: DEPRECATED Chart for PostgreSQL, an object-relational database management
system (ORDBMS) with an emphasis on extensibility and on standards-compliance.
engine: gotpl
home: https://www.postgresql.org/
icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png
keywords:
- postgresql
- postgres
- database
- sql
- replication
- cluster
name: postgresql
sources:
- https://github.com/bitnami/bitnami-docker-postgresql
version: 8.6.4

View File

@@ -0,0 +1,587 @@
# PostgreSQL
[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance.
For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha)
## This Helm chart is deprecated
Given the [`stable` deprecation timeline](https://github.com/helm/charts#deprecation-timeline), the Bitnami maintained PostgreSQL Helm chart is now located at [bitnami/charts](https://github.com/bitnami/charts/).
The Bitnami repository is already included in the Hubs and we will continue providing the same cadence of updates, support, etc that we've been keeping here these years. Installation instructions are very similar, just adding the _bitnami_ repo and using it during the installation (`bitnami/<chart>` instead of `stable/<chart>`)
```bash
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/<chart> # Helm 3
$ helm install --name my-release bitnami/<chart> # Helm 2
```
To update an exisiting _stable_ deployment with a chart hosted in the bitnami repository you can execute
```bash
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm upgrade my-release bitnami/<chart>
```
Issues and PRs related to the chart itself will be redirected to `bitnami/charts` GitHub repository. In the same way, we'll be happy to answer questions related to this migration process in [this issue](https://github.com/helm/charts/issues/20969) created as a common place for discussion.
## TL;DR;
```console
$ helm install my-release stable/postgresql
```
## Introduction
This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/).
## Prerequisites
- Kubernetes 1.12+
- Helm 2.11+ or Helm 3.0-beta3+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install my-release stable/postgresql
```
The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Parameters
The following tables lists the configurable parameters of the PostgreSQL chart and their default values.
| Parameter | Description | Default |
|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------|
| `global.imageRegistry` | Global Docker Image registry | `nil` |
| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` |
| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` |
| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` |
| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` |
| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` |
| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `global.storageClass` | Global storage class for dynamic provisioning | `nil` |
| `image.registry` | PostgreSQL Image registry | `docker.io` |
| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` |
| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` |
| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
| `image.debug` | Specify if debug values should be set | `false` |
| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` |
| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` |
| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` |
| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` |
| `ldap.enabled` | Enable LDAP support | `false` |
| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` |
| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` |
| `ldap.server` | IP address or name of the LDAP server. | `nil` |
| `ldap.port` | Port number on the LDAP server to connect to | `nil` |
| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` |
| `ldap.tls` | Set to `1` to use TLS encryption | `nil` |
| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` |
| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` |
| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` |
| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` |
| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` |
| `ldap.bindDN` | DN of user to bind to LDAP | `nil` |
| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` |
| `replication.enabled` | Enable replication | `false` |
| `replication.user` | Replication user | `repl_user` |
| `replication.password` | Replication user password | `repl_password` |
| `replication.slaveReplicas` | Number of slaves replicas | `1` |
| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` |
| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. | `nil` |
| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ |
| `postgresqlUsername` | PostgreSQL admin user | `postgres` |
| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ |
| `postgresqlDatabase` | PostgreSQL database | `nil` |
| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) |
| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` |
| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. | `nil` |
| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` |
| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` |
| `postgresqlConfiguration` | Runtime Config Parameters | `nil` |
| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` |
| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` |
| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` |
| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` |
| `initdbScripts` | Dictionary of initdb scripts | `nil` |
| `initdbUsername` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` |
| `initdbPassword` | Password for the user specified in `initdbUsername` | `nil` |
| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` |
| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.port` | PostgreSQL port | `5432` |
| `service.nodePort` | Kubernetes Service nodePort | `nil` |
| `service.annotations` | Annotations for PostgreSQL service, the value is evaluated as a template. | {} |
| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | [] |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` |
| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` |
| `persistence.enabled` | Enable persistence using PVC | `true` |
| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` |
| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` |
| `persistence.subPath` | Subdirectory of the volume to mount at | `""` |
| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` |
| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` |
| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
| `persistence.annotations` | Annotations for the PVC | `{}` |
| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` |
| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` |
| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` |
| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` |
| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` |
| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` |
| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` |
| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` |
| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` |
| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` |
| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` |
| `master.sidecars` | Add additional containers to the pod | `[]` |
| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` |
| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` |
| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` |
| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` |
| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` |
| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` |
| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` |
| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` |
| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` |
| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` |
| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` |
| `slave.sidecars` | Add additional containers to the pod | `[]` |
| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` |
| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` |
| `securityContext.enabled` | Enable security context | `true` |
| `securityContext.fsGroup` | Group ID for the container | `1001` |
| `securityContext.runAsUser` | User ID for the container | `1001` |
| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` |
| `serviceAcccount.name` | Name of existing service account | `nil` |
| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` |
| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `nil` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 |
| `readinessProbe.periodSeconds` | How often to perform the probe | 10 |
| `readinessProbe.timeoutSeconds` | When the probe times out | 5 |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
| `metrics.enabled` | Start a prometheus exporter | `false` |
| `metrics.service.type` | Kubernetes Service type | `ClusterIP` |
| `service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` |
| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` |
| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` |
| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` |
| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql |
| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` |
| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` |
| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` |
| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` |
| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
| `metrics.customMetrics` | Additional custom metrics | `nil` |
| `metrics.securityContext.enabled` | Enable security context for metrics | `false` |
| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` |
| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 |
| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` |
| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 |
| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 |
| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 |
| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install my-release \
--set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \
stable/postgresql
```
The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
$ helm install my-release -f values.yaml stable/postgresql
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Production configuration and horizontal scaling
This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
- Enable replication:
```diff
- replication.enabled: false
+ replication.enabled: true
```
- Number of slaves replicas:
```diff
- replication.slaveReplicas: 1
+ replication.slaveReplicas: 2
```
- Set synchronous commit mode:
```diff
- replication.synchronousCommit: "off"
+ replication.synchronousCommit: "on"
```
- Number of replicas that will have synchronous replication:
```diff
- replication.numSynchronousReplicas: 0
+ replication.numSynchronousReplicas: 1
```
- Start a prometheus exporter:
```diff
- metrics.enabled: false
+ metrics.enabled: true
```
To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above.
### Change PostgreSQL version
To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0`
### postgresql.conf / pg_hba.conf files as configMap
This helm chart also supports to customize the whole configuration file.
Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server.
Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}.
In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options.
### Allow settings to be loaded from files other than the default `postgresql.conf`
If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory.
Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option.
### Initialize a fresh instance
The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap.
Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict.
In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter.
The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
### Sidecars
If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
```yaml
# For the PostgreSQL master
master:
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
# For the PostgreSQL replicas
slave:
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
### Metrics
The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
### Use of global variables
In more complex scenarios, we may have the following tree of dependencies
```
+--------------+
| |
+------------+ Chart 1 +-----------+
| | | |
| --------+------+ |
| | |
| | |
| | |
| | |
v v v
+-------+------+ +--------+------+ +--------+------+
| | | | | |
| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 |
| | | | | |
+--------------+ +---------------+ +---------------+
```
The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters:
```
postgresql.postgresqlPassword=testtest
subchart1.postgresql.postgresqlPassword=testtest
subchart2.postgresql.postgresqlPassword=testtest
postgresql.postgresqlDatabase=db1
subchart1.postgresql.postgresqlDatabase=db1
subchart2.postgresql.postgresqlDatabase=db1
```
If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows:
```
global.postgresql.postgresqlPassword=testtest
global.postgresql.postgresqlDatabase=db1
```
This way, the credentials will be available in all of the subcharts.
## Persistence
The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished.
## NetworkPolicy
To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
```console
$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
```
With NetworkPolicy enabled, traffic will be limited to just port 5432.
For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
This label will be displayed in the output of a successful install.
## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image
- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image.
- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift.
- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false
### Deploy chart using Docker Official PostgreSQL Image
From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image.
Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory.
```
helm install postgres \
--set image.repository=postgres \
--set image.tag=10.6 \
--set postgresqlDataDir=/data/pgdata \
--set persistence.mountPath=/data/ \
stable/postgresql
```
## Upgrade
It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart:
```bash
$ helm upgrade my-release stable/postgresql \
--set postgresqlPassword=[POSTGRESQL_PASSWORD] \
--set replication.password=[REPLICATION_PASSWORD]
```
> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes.
## 8.0.0
Prefixes the port names with their protocols to comply with Istio conventions.
If you depend on the port names in your setup, make sure to update them to reflect this change.
## 7.1.0
Adds support for LDAP configuration.
## 7.0.0
Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
This major version bump signifies this change.
## 6.5.7
In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies:
- protobuf
- protobuf-c
- json-c
- geos
- proj
## 5.0.0
In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/).
For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs:
```bash
Welcome to the Bitnami postgresql container
Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql
Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues
Send us your feedback at containers@bitnami.com
INFO ==> ** Starting PostgreSQL setup **
NFO ==> Validating settings in POSTGRESQL_* env vars..
INFO ==> Initializing PostgreSQL database...
INFO ==> postgresql.conf file not detected. Generating it...
INFO ==> pg_hba.conf file not detected. Generating it...
INFO ==> Deploying PostgreSQL with persisted data...
INFO ==> Configuring replication parameters
INFO ==> Loading custom scripts...
INFO ==> Enabling remote connections
INFO ==> Stopping PostgreSQL...
INFO ==> ** PostgreSQL setup finished! **
INFO ==> ** Starting PostgreSQL **
[1] FATAL: database files are incompatible with server
[1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3.
```
In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one.
### 4.0.0
This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately.
IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error
```
The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development
```
### 3.0.0
This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods.
It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride.
#### Breaking changes
- `affinty` has been renamed to `master.affinity` and `slave.affinity`.
- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`.
- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`.
### 2.0.0
In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps:
- Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running
```console
$ kubectl get svc
```
- Install (not upgrade) the new version
```console
$ helm repo update
$ helm install my-release stable/postgresql
```
- Connect to the new pod (you can obtain the name by running `kubectl get pods`):
```console
$ kubectl exec -it NAME bash
```
- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart:
```console
$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql
```
After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`).
This operation could take some time depending on the database size.
- Once you have the backup file, you can restore it with a command like the one below:
```console
$ psql -U postgres DATABASE_NAME < /tmp/backup.sql
```
In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt).
If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below.
```console
$ psql -U postgres
postgres=# drop database DATABASE_NAME;
postgres=# create database DATABASE_NAME;
postgres=# create user USER_NAME;
postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD';
postgres=# grant all privileges on database DATABASE_NAME to USER_NAME;
postgres=# alter database DATABASE_NAME owner to USER_NAME;
```

View File

@@ -0,0 +1 @@
# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml.

View File

@@ -0,0 +1,2 @@
shmVolume:
enabled: false

View File

@@ -0,0 +1 @@
Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map.

View File

@@ -0,0 +1,4 @@
If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files.
These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file).

View File

@@ -0,0 +1,3 @@
You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image.
More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository.

View File

@@ -0,0 +1,81 @@
This Helm chart is deprecated
Given the `stable` deprecation timeline (https://github.com/helm/charts#deprecation-timeline), the Bitnami maintained Helm chart is now located at bitnami/charts (https://github.com/bitnami/charts/).
The Bitnami repository is already included in the Hubs and we will continue providing the same cadence of updates, support, etc that we've been keeping here these years. Installation instructions are very similar, just adding the _bitnami_ repo and using it during the installation (`bitnami/<chart>` instead of `stable/<chart>`)
```bash
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/<chart> # Helm 3
$ helm install --name my-release bitnami/<chart> # Helm 2
```
To update an exisiting _stable_ deployment with a chart hosted in the bitnami repository you can execute
```bash
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm upgrade my-release bitnami/<chart>
```
Issues and PRs related to the chart itself will be redirected to `bitnami/charts` GitHub repository. In the same way, we'll be happy to answer questions related to this migration process in this issue (https://github.com/helm/charts/issues/20969) created as a common place for discussion.
** Please be patient while the chart is being deployed **
PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster:
{{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
{{- if .Values.replication.enabled }}
{{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
{{- end }}
{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
To get the password for "postgres" run:
export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode)
{{- end }}
To get the password for "{{ template "postgresql.username" . }}" run:
export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode)
To connect to your database run the following command:
kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
--labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }}
{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster.
{{- end }}
To connect to your database from outside the cluster execute the following commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }})
{{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }}
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
{{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} &
{{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }}
{{- end }}
{{- include "postgresql.validateValues" . -}}
{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
{{- end }}

View File

@@ -0,0 +1,420 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "postgresql.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "postgresql.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "postgresql.master.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}}
{{- if .Values.replication.enabled -}}
{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "postgresql.networkPolicy.apiVersion" -}}
{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
"extensions/v1beta1"
{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
"networking.k8s.io/v1"
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "postgresql.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the proper PostgreSQL image name
*/}}
{{- define "postgresql.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL postgres user password
*/}}
{{- define "postgresql.postgres.password" -}}
{{- if .Values.global.postgresql.postgresqlPostgresPassword }}
{{- .Values.global.postgresql.postgresqlPostgresPassword -}}
{{- else if .Values.postgresqlPostgresPassword -}}
{{- .Values.postgresqlPostgresPassword -}}
{{- else -}}
{{- randAlphaNum 10 -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL password
*/}}
{{- define "postgresql.password" -}}
{{- if .Values.global.postgresql.postgresqlPassword }}
{{- .Values.global.postgresql.postgresqlPassword -}}
{{- else if .Values.postgresqlPassword -}}
{{- .Values.postgresqlPassword -}}
{{- else -}}
{{- randAlphaNum 10 -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL replication password
*/}}
{{- define "postgresql.replication.password" -}}
{{- if .Values.global.postgresql.replicationPassword }}
{{- .Values.global.postgresql.replicationPassword -}}
{{- else if .Values.replication.password -}}
{{- .Values.replication.password -}}
{{- else -}}
{{- randAlphaNum 10 -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL username
*/}}
{{- define "postgresql.username" -}}
{{- if .Values.global.postgresql.postgresqlUsername }}
{{- .Values.global.postgresql.postgresqlUsername -}}
{{- else -}}
{{- .Values.postgresqlUsername -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL replication username
*/}}
{{- define "postgresql.replication.username" -}}
{{- if .Values.global.postgresql.replicationUser }}
{{- .Values.global.postgresql.replicationUser -}}
{{- else -}}
{{- .Values.replication.user -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL port
*/}}
{{- define "postgresql.port" -}}
{{- if .Values.global.postgresql.servicePort }}
{{- .Values.global.postgresql.servicePort -}}
{{- else -}}
{{- .Values.service.port -}}
{{- end -}}
{{- end -}}
{{/*
Return PostgreSQL created database
*/}}
{{- define "postgresql.database" -}}
{{- if .Values.global.postgresql.postgresqlDatabase }}
{{- .Values.global.postgresql.postgresqlDatabase -}}
{{- else if .Values.postgresqlDatabase -}}
{{- .Values.postgresqlDatabase -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper image name to change the volume permissions
*/}}
{{- define "postgresql.volumePermissions.image" -}}
{{- $registryName := .Values.volumePermissions.image.registry -}}
{{- $repositoryName := .Values.volumePermissions.image.repository -}}
{{- $tag := .Values.volumePermissions.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper PostgreSQL metrics image name
*/}}
{{- define "postgresql.metrics.image" -}}
{{- $registryName := default "docker.io" .Values.metrics.image.registry -}}
{{- $repositoryName := .Values.metrics.image.repository -}}
{{- $tag := default "latest" .Values.metrics.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Get the password secret.
*/}}
{{- define "postgresql.secretName" -}}
{{- if .Values.global.postgresql.existingSecret }}
{{- printf "%s" .Values.global.postgresql.existingSecret -}}
{{- else if .Values.existingSecret -}}
{{- printf "%s" .Values.existingSecret -}}
{{- else -}}
{{- printf "%s" (include "postgresql.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a secret object should be created
*/}}
{{- define "postgresql.createSecret" -}}
{{- if .Values.global.postgresql.existingSecret }}
{{- else if .Values.existingSecret -}}
{{- else -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Get the configuration ConfigMap name.
*/}}
{{- define "postgresql.configurationCM" -}}
{{- if .Values.configurationConfigMap -}}
{{- printf "%s" (tpl .Values.configurationConfigMap $) -}}
{{- else -}}
{{- printf "%s-configuration" (include "postgresql.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get the extended configuration ConfigMap name.
*/}}
{{- define "postgresql.extendedConfigurationCM" -}}
{{- if .Values.extendedConfConfigMap -}}
{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}}
{{- else -}}
{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get the initialization scripts ConfigMap name.
*/}}
{{- define "postgresql.initdbScriptsCM" -}}
{{- if .Values.initdbScriptsConfigMap -}}
{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}}
{{- else -}}
{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get the initialization scripts Secret name.
*/}}
{{- define "postgresql.initdbScriptsSecret" -}}
{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}}
{{- end -}}
{{/*
Get the metrics ConfigMap name.
*/}}
{{- define "postgresql.metricsCM" -}}
{{- printf "%s-metrics" (include "postgresql.fullname" .) -}}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "postgresql.imagePullSecrets" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
Also, we can not use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.global.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.metrics.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}}
{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.metrics.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Get the readiness probe command
*/}}
{{- define "postgresql.readinessProbeCommand" -}}
- |
{{- if (include "postgresql.database" .) }}
exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
{{- else }}
exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
{{- end }}
{{- if contains "bitnami/" .Values.image.repository }}
[ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
{{- end -}}
{{- end -}}
{{/*
Return the proper Storage Class
*/}}
{{- define "postgresql.storageClass" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
*/}}
{{- if .Values.global -}}
{{- if .Values.global.storageClass -}}
{{- if (eq "-" .Values.global.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.global.storageClass -}}
{{- end -}}
{{- else -}}
{{- if .Values.persistence.storageClass -}}
{{- if (eq "-" .Values.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- if .Values.persistence.storageClass -}}
{{- if (eq "-" .Values.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Renders a value that contains template.
Usage:
{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "postgresql.tplValue" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}
{{/*
Return the appropriate apiVersion for statefulset.
*/}}
{{- define "postgresql.statefulset.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "apps/v1beta2" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Compile all warnings into a single message, and call fail.
*/}}
{{- define "postgresql.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/*
Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap
*/}}
{{- define "postgresql.validateValues.ldapConfigurationMethod" -}}
{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }}
postgresql: ldap.url, ldap.server
You cannot set both `ldap.url` and `ldap.server` at the same time.
Please provide a unique way to configure LDAP.
More info at https://www.postgresql.org/docs/current/auth-ldap.html
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,26 @@
{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "postgresql.fullname" . }}-configuration
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
data:
{{- if (.Files.Glob "files/postgresql.conf") }}
{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }}
{{- else if .Values.postgresqlConfiguration }}
postgresql.conf: |
{{- range $key, $value := default dict .Values.postgresqlConfiguration }}
{{ $key | snakecase }}={{ $value }}
{{- end }}
{{- end }}
{{- if (.Files.Glob "files/pg_hba.conf") }}
{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }}
{{- else if .Values.pgHbaConfiguration }}
pg_hba.conf: |
{{ .Values.pgHbaConfiguration | indent 4 }}
{{- end }}
{{ end }}

View File

@@ -0,0 +1,21 @@
{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "postgresql.fullname" . }}-extended-configuration
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
data:
{{- with .Files.Glob "files/conf.d/*.conf" }}
{{ .AsConfig | indent 2 }}
{{- end }}
{{ with .Values.postgresqlExtendedConf }}
override.conf: |
{{- range $key, $value := . }}
{{ $key | snakecase }}={{ $value }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "postgresql.fullname" . }}-init-scripts
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }}
binaryData:
{{- range $path, $bytes := . }}
{{ base $path }}: {{ $.Files.Get $path | b64enc | quote }}
{{- end }}
{{- end }}
data:
{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }}
{{ .AsConfig | indent 2 }}
{{- end }}
{{- with .Values.initdbScripts }}
{{ toYaml . | indent 2 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,13 @@
{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "postgresql.metricsCM" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
data:
custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.metrics.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "postgresql.fullname" . }}-metrics
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
annotations:
{{ toYaml .Values.metrics.service.annotations | indent 4 }}
spec:
type: {{ .Values.metrics.service.type }}
{{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}
{{- end }}
ports:
- name: http-metrics
port: 9187
targetPort: http-metrics
selector:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name }}
role: master
{{- end }}

View File

@@ -0,0 +1,38 @@
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }}
metadata:
name: {{ template "postgresql.fullname" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
spec:
podSelector:
matchLabels:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}
ingress:
# Allow inbound connections
- ports:
- port: {{ template "postgresql.port" . }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ template "postgresql.fullname" . }}-client: "true"
{{- if .Values.networkPolicy.explicitNamespacesSelector }}
namespaceSelector:
{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }}
{{- end }}
- podSelector:
matchLabels:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}
role: slave
{{- end }}
# Allow prometheus scrapes
- ports:
- port: 9187
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "postgresql.fullname" . }}
{{- with .Values.metrics.prometheusRule.namespace }}
namespace: {{ . }}
{{- end }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- with .Values.metrics.prometheusRule.additionalLabels }}
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- with .Values.metrics.prometheusRule.rules }}
groups:
- name: {{ template "postgresql.name" $ }}
rules: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if (include "postgresql.createSecret" .) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "postgresql.fullname" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
type: Opaque
data:
{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }}
{{- end }}
postgresql-password: {{ include "postgresql.password" . | b64enc | quote }}
{{- if .Values.replication.enabled }}
postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }}
{{- end }}
{{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}}
postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,11 @@
{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
name: {{ template "postgresql.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "postgresql.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,299 @@
{{- if .Values.replication.enabled }}
apiVersion: {{ template "postgresql.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: "{{ template "postgresql.fullname" . }}-slave"
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- with .Values.slave.labels }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.slave.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
serviceName: {{ template "postgresql.fullname" . }}-headless
replicas: {{ .Values.replication.slaveReplicas }}
selector:
matchLabels:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}
role: slave
template:
metadata:
name: {{ template "postgresql.fullname" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
role: slave
{{- with .Values.slave.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.slave.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- include "postgresql.imagePullSecrets" . | indent 6 }}
{{- if .Values.slave.nodeSelector }}
nodeSelector:
{{ toYaml .Values.slave.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.slave.affinity }}
affinity:
{{ toYaml .Values.slave.affinity | indent 8 }}
{{- end }}
{{- if .Values.slave.tolerations }}
tolerations:
{{ toYaml .Values.slave.tolerations | indent 8 }}
{{- end }}
{{- if .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}
{{- if .Values.serviceAccount.enabled }}
serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}}
{{- end }}
{{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }}
initContainers:
{{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }}
- name: init-chmod-data
image: {{ template "postgresql.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- end }}
command:
- /bin/sh
- -cx
- |
{{ if .Values.persistence.enabled }}
mkdir -p {{ .Values.persistence.mountPath }}/data
chmod 700 {{ .Values.persistence.mountPath }}/data
find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
xargs chown -R `id -u`:`id -G | cut -d " " -f2`
{{- else }}
xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}
{{- end }}
{{- end }}
{{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }}
chmod -R 777 /dev/shm
{{- end }}
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
securityContext:
{{- else }}
securityContext:
runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
{{- end }}
volumeMounts:
{{ if .Values.persistence.enabled }}
- name: data
mountPath: {{ .Values.persistence.mountPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.shmVolume.enabled }}
- name: dshm
mountPath: /dev/shm
{{- end }}
{{- end }}
{{- if .Values.slave.extraInitContainers }}
{{ tpl .Values.slave.extraInitContainers . | indent 8 }}
{{- end }}
{{- end }}
{{- if .Values.slave.priorityClassName }}
priorityClassName: {{ .Values.slave.priorityClassName }}
{{- end }}
containers:
- name: {{ template "postgresql.fullname" . }}
image: {{ template "postgresql.image" . }}
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" .Values.image.debug | quote }}
- name: POSTGRESQL_VOLUME_DIR
value: "{{ .Values.persistence.mountPath }}"
- name: POSTGRESQL_PORT_NUMBER
value: "{{ template "postgresql.port" . }}"
{{- if .Values.persistence.mountPath }}
- name: PGDATA
value: {{ .Values.postgresqlDataDir | quote }}
{{- end }}
- name: POSTGRES_REPLICATION_MODE
value: "slave"
- name: POSTGRES_REPLICATION_USER
value: {{ include "postgresql.replication.username" . | quote }}
{{- if .Values.usePasswordFile }}
- name: POSTGRES_REPLICATION_PASSWORD_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
{{- else }}
- name: POSTGRES_REPLICATION_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-replication-password
{{- end }}
- name: POSTGRES_CLUSTER_APP_NAME
value: {{ .Values.replication.applicationName }}
- name: POSTGRES_MASTER_HOST
value: {{ template "postgresql.fullname" . }}
- name: POSTGRES_MASTER_PORT_NUMBER
value: {{ include "postgresql.port" . | quote }}
{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
{{- if .Values.usePasswordFile }}
- name: POSTGRES_POSTGRES_PASSWORD_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password"
{{- else }}
- name: POSTGRES_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-postgres-password
{{- end }}
{{- end }}
{{- if .Values.usePasswordFile }}
- name: POSTGRES_PASSWORD_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-password"
{{- else }}
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-password
{{- end }}
ports:
- name: tcp-postgresql
containerPort: {{ template "postgresql.port" . }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- /bin/sh
- -c
{{- if (include "postgresql.database" .) }}
- exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
{{- else }}
- exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- /bin/sh
- -c
- -e
{{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
{{- if .Values.usePasswordFile }}
- name: postgresql-password
mountPath: /opt/bitnami/postgresql/secrets/
{{- end }}
{{- if .Values.shmVolume.enabled }}
- name: dshm
mountPath: /dev/shm
{{- end }}
{{- if .Values.persistence.enabled }}
- name: data
mountPath: {{ .Values.persistence.mountPath }}
subPath: {{ .Values.persistence.subPath }}
{{ end }}
{{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
- name: postgresql-extended-config
mountPath: /bitnami/postgresql/conf/conf.d/
{{- end }}
{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
- name: postgresql-config
mountPath: /bitnami/postgresql/conf
{{- end }}
{{- if .Values.slave.extraVolumeMounts }}
{{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.slave.sidecars }}
{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }}
{{- end }}
volumes:
{{- if .Values.usePasswordFile }}
- name: postgresql-password
secret:
secretName: {{ template "postgresql.secretName" . }}
{{- end }}
{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
- name: postgresql-config
configMap:
name: {{ template "postgresql.configurationCM" . }}
{{- end }}
{{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
- name: postgresql-extended-config
configMap:
name: {{ template "postgresql.extendedConfigurationCM" . }}
{{- end }}
{{- if .Values.shmVolume.enabled }}
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi
{{- end }}
{{- if not .Values.persistence.enabled }}
- name: data
emptyDir: {}
{{- end }}
{{- if .Values.slave.extraVolumes }}
{{- toYaml .Values.slave.extraVolumes | nindent 8 }}
{{- end }}
updateStrategy:
type: {{ .Values.updateStrategy.type }}
{{- if (eq "Recreate" .Values.updateStrategy.type) }}
rollingUpdate: null
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
{{- with .Values.persistence.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{ include "postgresql.storageClass" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,458 @@
apiVersion: {{ template "postgresql.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ template "postgresql.master.fullname" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- with .Values.master.labels }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.master.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
serviceName: {{ template "postgresql.fullname" . }}-headless
replicas: 1
updateStrategy:
type: {{ .Values.updateStrategy.type }}
{{- if (eq "Recreate" .Values.updateStrategy.type) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}
role: master
template:
metadata:
name: {{ template "postgresql.fullname" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
role: master
{{- with .Values.master.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.master.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- include "postgresql.imagePullSecrets" . | indent 6 }}
{{- if .Values.master.nodeSelector }}
nodeSelector:
{{ toYaml .Values.master.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.master.affinity }}
affinity:
{{ toYaml .Values.master.affinity | indent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations:
{{ toYaml .Values.master.tolerations | indent 8 }}
{{- end }}
{{- if .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}
{{- if .Values.serviceAccount.enabled }}
serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }}
{{- end }}
{{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }}
initContainers:
{{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }}
- name: init-chmod-data
image: {{ template "postgresql.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- end }}
command:
- /bin/sh
- -cx
- |
{{ if .Values.persistence.enabled }}
mkdir -p {{ .Values.persistence.mountPath }}/data
chmod 700 {{ .Values.persistence.mountPath }}/data
find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
xargs chown -R `id -u`:`id -G | cut -d " " -f2`
{{- else }}
xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}
{{- end }}
{{- end }}
{{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }}
chmod -R 777 /dev/shm
{{- end }}
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
securityContext:
{{- else }}
securityContext:
runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
{{- end }}
volumeMounts:
{{ if .Values.persistence.enabled }}
- name: data
mountPath: {{ .Values.persistence.mountPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.shmVolume.enabled }}
- name: dshm
mountPath: /dev/shm
{{- end }}
{{- end }}
{{- if .Values.master.extraInitContainers }}
{{ tpl .Values.master.extraInitContainers . | indent 8 }}
{{- end }}
{{- end }}
{{- if .Values.master.priorityClassName }}
priorityClassName: {{ .Values.master.priorityClassName }}
{{- end }}
containers:
- name: {{ template "postgresql.fullname" . }}
image: {{ template "postgresql.image" . }}
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" .Values.image.debug | quote }}
- name: POSTGRESQL_PORT_NUMBER
value: "{{ template "postgresql.port" . }}"
- name: POSTGRESQL_VOLUME_DIR
value: "{{ .Values.persistence.mountPath }}"
{{- if .Values.postgresqlInitdbArgs }}
- name: POSTGRES_INITDB_ARGS
value: {{ .Values.postgresqlInitdbArgs | quote }}
{{- end }}
{{- if .Values.postgresqlInitdbWalDir }}
- name: POSTGRES_INITDB_WALDIR
value: {{ .Values.postgresqlInitdbWalDir | quote }}
{{- end }}
{{- if .Values.initdbUser }}
- name: POSTGRESQL_INITSCRIPTS_USERNAME
value: {{ .Values.initdbUser }}
{{- end }}
{{- if .Values.initdbPassword }}
- name: POSTGRESQL_INITSCRIPTS_PASSWORD
value: .Values.initdbPassword
{{- end }}
{{- if .Values.persistence.mountPath }}
- name: PGDATA
value: {{ .Values.postgresqlDataDir | quote }}
{{- end }}
{{- if .Values.replication.enabled }}
- name: POSTGRES_REPLICATION_MODE
value: "master"
- name: POSTGRES_REPLICATION_USER
value: {{ include "postgresql.replication.username" . | quote }}
{{- if .Values.usePasswordFile }}
- name: POSTGRES_REPLICATION_PASSWORD_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
{{- else }}
- name: POSTGRES_REPLICATION_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-replication-password
{{- end }}
{{- if not (eq .Values.replication.synchronousCommit "off")}}
- name: POSTGRES_SYNCHRONOUS_COMMIT_MODE
value: {{ .Values.replication.synchronousCommit | quote }}
- name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS
value: {{ .Values.replication.numSynchronousReplicas | quote }}
{{- end }}
- name: POSTGRES_CLUSTER_APP_NAME
value: {{ .Values.replication.applicationName }}
{{- end }}
{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
{{- if .Values.usePasswordFile }}
- name: POSTGRES_POSTGRES_PASSWORD_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password"
{{- else }}
- name: POSTGRES_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-postgres-password
{{- end }}
{{- end }}
- name: POSTGRES_USER
value: {{ include "postgresql.username" . | quote }}
{{- if .Values.usePasswordFile }}
- name: POSTGRES_PASSWORD_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-password"
{{- else }}
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-password
{{- end }}
{{- if (include "postgresql.database" .) }}
- name: POSTGRES_DB
value: {{ (include "postgresql.database" .) | quote }}
{{- end }}
{{- if .Values.extraEnv }}
{{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }}
{{- end }}
- name: POSTGRESQL_ENABLE_LDAP
value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }}
{{- if .Values.ldap.enabled }}
- name: POSTGRESQL_LDAP_SERVER
value: {{ .Values.ldap.server }}
- name: POSTGRESQL_LDAP_PORT
value: {{ .Values.ldap.port | quote }}
- name: POSTGRESQL_LDAP_SCHEME
value: {{ .Values.ldap.scheme }}
{{- if .Values.ldap.tls }}
- name: POSTGRESQL_LDAP_TLS
value: "1"
{{- end}}
- name: POSTGRESQL_LDAP_PREFIX
value: {{ .Values.ldap.prefix | quote }}
- name: POSTGRESQL_LDAP_SUFFIX
value: {{ .Values.ldap.suffix | quote}}
- name: POSTGRESQL_LDAP_BASE_DN
value: {{ .Values.ldap.baseDN }}
- name: POSTGRESQL_LDAP_BIND_DN
value: {{ .Values.ldap.bindDN }}
{{- if (not (empty .Values.ldap.bind_password)) }}
- name: POSTGRESQL_LDAP_BIND_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-ldap-password
{{- end}}
- name: POSTGRESQL_LDAP_SEARCH_ATTR
value: {{ .Values.ldap.search_attr }}
- name: POSTGRESQL_LDAP_SEARCH_FILTER
value: {{ .Values.ldap.search_filter }}
- name: POSTGRESQL_LDAP_URL
value: {{ .Values.ldap.url }}
{{- end}}
{{- if .Values.extraEnvVarsCM }}
envFrom:
- configMapRef:
name: {{ .Values.extraEnvVarsCM }}
{{- end }}
ports:
- name: tcp-postgresql
containerPort: {{ template "postgresql.port" . }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- /bin/sh
- -c
{{- if (include "postgresql.database" .) }}
- exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
{{- else }}
- exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- /bin/sh
- -c
- -e
{{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
{{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/
{{- end }}
{{- if .Values.initdbScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/secret
{{- end }}
{{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
- name: postgresql-extended-config
mountPath: /bitnami/postgresql/conf/conf.d/
{{- end }}
{{- if .Values.usePasswordFile }}
- name: postgresql-password
mountPath: /opt/bitnami/postgresql/secrets/
{{- end }}
{{- if .Values.shmVolume.enabled }}
- name: dshm
mountPath: /dev/shm
{{- end }}
{{- if .Values.persistence.enabled }}
- name: data
mountPath: {{ .Values.persistence.mountPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
- name: postgresql-config
mountPath: /bitnami/postgresql/conf
{{- end }}
{{- if .Values.master.extraVolumeMounts }}
{{- toYaml .Values.master.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.master.sidecars }}
{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: {{ template "postgresql.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.metrics.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.metrics.securityContext.runAsUser }}
{{- end }}
env:
{{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }}
- name: DATA_SOURCE_URI
value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }}
{{- if .Values.usePasswordFile }}
- name: DATA_SOURCE_PASS_FILE
value: "/opt/bitnami/postgresql/secrets/postgresql-password"
{{- else }}
- name: DATA_SOURCE_PASS
valueFrom:
secretKeyRef:
name: {{ template "postgresql.secretName" . }}
key: postgresql-password
{{- end }}
- name: DATA_SOURCE_USER
value: {{ template "postgresql.username" . }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /
port: http-metrics
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /
port: http-metrics
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
{{- if .Values.usePasswordFile }}
- name: postgresql-password
mountPath: /opt/bitnami/postgresql/secrets/
{{- end }}
{{- if .Values.metrics.customMetrics }}
- name: custom-metrics
mountPath: /conf
readOnly: true
args: ["--extend.query-path", "/conf/custom-metrics.yaml"]
{{- end }}
ports:
- name: http-metrics
containerPort: 9187
{{- if .Values.metrics.resources }}
resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
{{- end }}
{{- end }}
volumes:
{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
- name: postgresql-config
configMap:
name: {{ template "postgresql.configurationCM" . }}
{{- end }}
{{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
- name: postgresql-extended-config
configMap:
name: {{ template "postgresql.extendedConfigurationCM" . }}
{{- end }}
{{- if .Values.usePasswordFile }}
- name: postgresql-password
secret:
secretName: {{ template "postgresql.secretName" . }}
{{- end }}
{{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "postgresql.initdbScriptsCM" . }}
{{- end }}
{{- if .Values.initdbScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "postgresql.initdbScriptsSecret" . }}
{{- end }}
{{- if .Values.master.extraVolumes }}
{{- toYaml .Values.master.extraVolumes | nindent 8 }}
{{- end }}
{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
- name: custom-metrics
configMap:
name: {{ template "postgresql.metricsCM" . }}
{{- end }}
{{- if .Values.shmVolume.enabled }}
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi
{{- end }}
{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
- name: data
persistentVolumeClaim:
{{- with .Values.persistence.existingClaim }}
claimName: {{ tpl . $ }}
{{- end }}
{{- else if not .Values.persistence.enabled }}
- name: data
emptyDir: {}
{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
volumeClaimTemplates:
- metadata:
name: data
{{- with .Values.persistence.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{ include "postgresql.storageClass" . }}
{{- end }}

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "postgresql.fullname" . }}-headless
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-postgresql
port: {{ template "postgresql.port" . }}
targetPort: tcp-postgresql
selector:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}

View File

@@ -0,0 +1,31 @@
{{- if .Values.replication.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "postgresql.fullname" . }}-read
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
ports:
- name: tcp-postgresql
port: {{ template "postgresql.port" . }}
targetPort: tcp-postgresql
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}
role: slave
{{- end }}

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "postgresql.fullname" . }}
labels:
app: {{ template "postgresql.name" . }}
chart: {{ template "postgresql.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
{{- with .Values.service.annotations }}
annotations:
{{ tpl (toYaml .) $ | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ with .Values.service.loadBalancerSourceRanges }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
{{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
ports:
- name: tcp-postgresql
port: {{ template "postgresql.port" . }}
targetPort: tcp-postgresql
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
app: {{ template "postgresql.name" . }}
release: {{ .Release.Name | quote }}
role: master

View File

@@ -0,0 +1,520 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
postgresql: {}
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami PostgreSQL image version
## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
##
image:
registry: docker.io
repository: bitnami/postgresql
tag: 11.7.0-debian-10-r9
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## String to partially override postgresql.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override postgresql.fullname template
##
# fullnameOverride:
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container Security Context
## Note: the chown of the data folder is done to securityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## pod securityContext.enabled=false and shmVolume.chmod.enabled=false
##
securityContext:
runAsUser: 0
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
enabled: false
## Name of an already existing service account. Setting this value disables the automatic service account creation.
# name:
replication:
enabled: true
user: repl_user
password: repl_password
slaveReplicas: 2
## Set synchronous commit mode: on, off, remote_apply, remote_write and local
## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
synchronousCommit: "on"
## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
## NOTE: It cannot be > slaveReplicas
numSynchronousReplicas: 1
## Replication Cluster application name. Useful for defining multiple replication policies
applicationName: my_application
## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
# postgresqlPostgresPassword:
## PostgreSQL user (has superuser privileges if username is `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
postgresqlUsername: postgres
## PostgreSQL password
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
##
# postgresqlPassword:
## PostgreSQL password using existing secret
## existingSecret: secret
## Mount PostgreSQL secret as a file instead of passing environment variable
# usePasswordFile: false
## Create a database
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
##
# postgresqlDatabase:
## PostgreSQL data dir
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
postgresqlDataDir: /bitnami/postgresql/data
## An array to add extra environment variables
## For example:
## extraEnv:
## - name: FOO
## value: "bar"
##
# extraEnv:
extraEnv: []
## Name of a ConfigMap containing extra env vars
##
# extraEnvVarsCM:
## Specify extra initdb args
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbArgs:
## Specify a custom location for the PostgreSQL transaction log
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbWalDir:
## PostgreSQL configuration
## Specify runtime configuration parameters as a dict, using camelCase, e.g.
## {"sharedBuffers": "500MB"}
## Alternatively, you can put your postgresql.conf under the files/ directory
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
# postgresqlConfiguration:
## PostgreSQL extended configuration
## As above, but _appended_ to the main configuration
## Alternatively, you can put your *.conf under the files/conf.d/ directory
## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
# postgresqlExtendedConf:
## PostgreSQL client authentication configuration
## Specify content for pg_hba.conf
## Default: do not create pg_hba.conf
## Alternatively, you can put your pg_hba.conf under the files/ directory
# pgHbaConfiguration: |-
# local all all trust
# host all all localhost trust
# host mydatabase mysuser 192.168.0.0/24 md5
## ConfigMap with PostgreSQL configuration
## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
# configurationConfigMap:
## ConfigMap with PostgreSQL extended configuration
# extendedConfConfigMap:
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
## Specify the PostgreSQL username and password to execute the initdb scripts
# initdbUser:
# initdbPassword:
## ConfigMap with scripts to be run at first boot
## NOTE: This will override initdbScripts
# initdbScriptsConfigMap:
## Secret with scripts to be run at first boot (in case it contains sensitive information)
## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
# initdbScriptsSecret:
## Optional duration in seconds the pod needs to terminate gracefully.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
# terminationGracePeriodSeconds: 30
## LDAP configuration
##
ldap:
enabled: false
url: ""
server: ""
port: ""
prefix: ""
suffix: ""
baseDN: ""
bindDN: ""
bind_password:
search_attr: ""
search_filter: ""
scheme: ""
tls: false
## PostgreSQL service configuration
service:
## PosgresSQL service type
type: ClusterIP
# clusterIP: None
port: 5432
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required.
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
annotations: {}
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Start master and slave(s) pod(s) without limitations on shm memory.
## By default docker and containerd (and possibly other container runtimes)
## limit `/dev/shm` to `64M` (see e.g. the
## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
## [containerd issue](https://github.com/containerd/containerd/issues/3654),
## which could be not enough if PostgreSQL uses parallel workers heavily.
##
shmVolume:
## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove
## this limitation.
##
enabled: true
## Set to `true` to `chmod 777 /dev/shm` on a initContainer.
## This option is ingored if `volumePermissions.enabled` is `false`
##
chmod:
enabled: true
## PostgreSQL data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
##
# existingClaim:
## The path the volume will be mounted at, useful when using different
## PostgreSQL images.
##
mountPath: /bitnami/postgresql
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
##
## PostgreSQL Master parameters
##
master:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
## Additional PostgreSQL Master Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Master Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
##
## PostgreSQL Slave parameters
##
slave:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Additional PostgreSQL Slave Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Slave Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 250m
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port PostgreSQL is listening
## on. When true, PostgreSQL will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the DB.
## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
##
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure metrics exporter
##
metrics:
enabled: true
# resources: {}
service:
type: ClusterIP
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
loadBalancerIP:
serviceMonitor:
enabled: false
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current postgresql service.
# - alert: HugeReplicationLag
# expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1
# for: 1m
# labels:
# severity: critical
# annotations:
# description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
# summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.8.0-debian-10-r28
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Define additional custom metrics
## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
# customMetrics:
# pg_database:
# query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
# metrics:
# - name:
# usage: "LABEL"
# description: "Name of the database"
# - size_bytes:
# usage: "GAUGE"
# description: "Size of the database in bytes"
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
runAsUser: 1001
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## Configure extra options for liveness and readiness probes
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1

View File

@@ -0,0 +1,103 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"postgresqlUsername": {
"type": "string",
"title": "Admin user",
"form": true
},
"postgresqlPassword": {
"type": "string",
"title": "Password",
"form": true
},
"persistence": {
"type": "object",
"properties": {
"size": {
"type": "string",
"title": "Persistent Volume Size",
"form": true,
"render": "slider",
"sliderMin": 1,
"sliderMax": 100,
"sliderUnit": "Gi"
}
}
},
"resources": {
"type": "object",
"title": "Required Resources",
"description": "Configure resource requests",
"form": true,
"properties": {
"requests": {
"type": "object",
"properties": {
"memory": {
"type": "string",
"form": true,
"render": "slider",
"title": "Memory Request",
"sliderMin": 10,
"sliderMax": 2048,
"sliderUnit": "Mi"
},
"cpu": {
"type": "string",
"form": true,
"render": "slider",
"title": "CPU Request",
"sliderMin": 10,
"sliderMax": 2000,
"sliderUnit": "m"
}
}
}
}
},
"replication": {
"type": "object",
"form": true,
"title": "Replication Details",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enable Replication",
"form": true
},
"slaveReplicas": {
"type": "integer",
"title": "Slave Replicas",
"form": true,
"hidden": {
"condition": false,
"value": "replication.enabled"
}
}
}
},
"volumePermissions": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"form": true,
"title": "Enable Init Containers",
"description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup"
}
}
},
"metrics": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"title": "Configure metrics exporter",
"form": true
}
}
}
}
}

View File

@@ -0,0 +1,526 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
postgresql: {}
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami PostgreSQL image version
## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
##
image:
registry: docker.io
repository: bitnami/postgresql
tag: 11.7.0-debian-10-r9
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## String to partially override postgresql.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override postgresql.fullname template
##
# fullnameOverride:
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container Security Context
## Note: the chown of the data folder is done to securityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## pod securityContext.enabled=false and shmVolume.chmod.enabled=false
##
securityContext:
runAsUser: 0
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
enabled: false
## Name of an already existing service account. Setting this value disables the automatic service account creation.
# name:
replication:
enabled: false
user: repl_user
password: repl_password
slaveReplicas: 1
## Set synchronous commit mode: on, off, remote_apply, remote_write and local
## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
synchronousCommit: "off"
## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
## NOTE: It cannot be > slaveReplicas
numSynchronousReplicas: 0
## Replication Cluster application name. Useful for defining multiple replication policies
applicationName: my_application
## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
# postgresqlPostgresPassword:
## PostgreSQL user (has superuser privileges if username is `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
postgresqlUsername: postgres
## PostgreSQL password
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
##
# postgresqlPassword:
## PostgreSQL password using existing secret
## existingSecret: secret
## Mount PostgreSQL secret as a file instead of passing environment variable
# usePasswordFile: false
## Create a database
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
##
# postgresqlDatabase:
## PostgreSQL data dir
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
postgresqlDataDir: /bitnami/postgresql/data
## An array to add extra environment variables
## For example:
## extraEnv:
## - name: FOO
## value: "bar"
##
# extraEnv:
extraEnv: []
## Name of a ConfigMap containing extra env vars
##
# extraEnvVarsCM:
## Specify extra initdb args
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbArgs:
## Specify a custom location for the PostgreSQL transaction log
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbWalDir:
## PostgreSQL configuration
## Specify runtime configuration parameters as a dict, using camelCase, e.g.
## {"sharedBuffers": "500MB"}
## Alternatively, you can put your postgresql.conf under the files/ directory
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
# postgresqlConfiguration:
## PostgreSQL extended configuration
## As above, but _appended_ to the main configuration
## Alternatively, you can put your *.conf under the files/conf.d/ directory
## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
# postgresqlExtendedConf:
## PostgreSQL client authentication configuration
## Specify content for pg_hba.conf
## Default: do not create pg_hba.conf
## Alternatively, you can put your pg_hba.conf under the files/ directory
# pgHbaConfiguration: |-
# local all all trust
# host all all localhost trust
# host mydatabase mysuser 192.168.0.0/24 md5
## ConfigMap with PostgreSQL configuration
## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
# configurationConfigMap:
## ConfigMap with PostgreSQL extended configuration
# extendedConfConfigMap:
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
## ConfigMap with scripts to be run at first boot
## NOTE: This will override initdbScripts
# initdbScriptsConfigMap:
## Secret with scripts to be run at first boot (in case it contains sensitive information)
## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
# initdbScriptsSecret:
## Specify the PostgreSQL username and password to execute the initdb scripts
# initdbUser:
# initdbPassword:
## Optional duration in seconds the pod needs to terminate gracefully.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
# terminationGracePeriodSeconds: 30
## LDAP configuration
##
ldap:
enabled: false
url: ""
server: ""
port: ""
prefix: ""
suffix: ""
baseDN: ""
bindDN: ""
bind_password:
search_attr: ""
search_filter: ""
scheme: ""
tls: false
## PostgreSQL service configuration
service:
## PosgresSQL service type
type: ClusterIP
# clusterIP: None
port: 5432
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required.
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
annotations: {}
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Start master and slave(s) pod(s) without limitations on shm memory.
## By default docker and containerd (and possibly other container runtimes)
## limit `/dev/shm` to `64M` (see e.g. the
## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
## [containerd issue](https://github.com/containerd/containerd/issues/3654),
## which could be not enough if PostgreSQL uses parallel workers heavily.
##
shmVolume:
## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove
## this limitation.
##
enabled: true
## Set to `true` to `chmod 777 /dev/shm` on a initContainer.
## This option is ingored if `volumePermissions.enabled` is `false`
##
chmod:
enabled: true
## PostgreSQL data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
##
# existingClaim:
## The path the volume will be mounted at, useful when using different
## PostgreSQL images.
##
mountPath: /bitnami/postgresql
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
##
## PostgreSQL Master parameters
##
master:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Additional PostgreSQL Master Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Master Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
##
## PostgreSQL Slave parameters
##
slave:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Additional PostgreSQL Slave Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Slave Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 250m
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port PostgreSQL is listening
## on. When true, PostgreSQL will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the DB.
## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
##
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure metrics exporter
##
metrics:
enabled: false
# resources: {}
service:
type: ClusterIP
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
loadBalancerIP:
serviceMonitor:
enabled: false
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current postgresql service.
# - alert: HugeReplicationLag
# expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1
# for: 1m
# labels:
# severity: critical
# annotations:
# description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
# summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.8.0-debian-10-r28
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Define additional custom metrics
## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
# customMetrics:
# pg_database:
# query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
# metrics:
# - name:
# usage: "LABEL"
# description: "Name of the database"
# - size_bytes:
# usage: "GAUGE"
# description: "Size of the database in bytes"
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
runAsUser: 1001
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## Configure extra options for liveness and readiness probes
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,17 @@
apiVersion: v1
appVersion: 3.5.5
description: Centralized service for maintaining configuration information, naming,
providing distributed synchronization, and providing group services.
home: https://zookeeper.apache.org/
icon: https://zookeeper.apache.org/images/zookeeper_small.gif
kubeVersion: ^1.10.0-0
maintainers:
- email: lachlan.evenson@microsoft.com
name: lachie83
- email: owensk@google.com
name: kow3ns
name: zookeeper
sources:
- https://github.com/apache/zookeeper
- https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
version: 2.1.4

View File

@@ -0,0 +1,6 @@
approvers:
- lachie83
- kow3ns
reviewers:
- lachie83
- kow3ns

View File

@@ -0,0 +1,145 @@
# incubator/zookeeper
This helm chart provides an implementation of the ZooKeeper [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) found in Kubernetes Contrib [Zookeeper StatefulSet](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper).
## Prerequisites
* Kubernetes 1.10+
* PersistentVolume support on the underlying infrastructure
* A dynamic provisioner for the PersistentVolumes
* A familiarity with [Apache ZooKeeper 3.5.x](https://zookeeper.apache.org/doc/r3.5.5/)
## Chart Components
This chart will do the following:
* Create a fixed size ZooKeeper ensemble using a [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/).
* Create a [PodDisruptionBudget](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-disruption-budget/) so kubectl drain will respect the Quorum size of the ensemble.
* Create a [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/) to control the domain of the ZooKeeper ensemble.
* Create a Service configured to connect to the available ZooKeeper instance on the configured client port.
* Optionally apply a [Pod Anti-Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) to spread the ZooKeeper ensemble across nodes.
* Optionally start JMX Exporter and Zookeeper Exporter containers inside Zookeeper pods.
* Optionally create a job which creates Zookeeper chroots (e.g. `/kafka1`).
* Optionally create a Prometheus ServiceMonitor for each enabled exporter container
## Installing the Chart
You can install the chart with the release name `zookeeper` as below.
```console
$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
$ helm install --name zookeeper incubator/zookeeper
```
If you do not specify a name, helm will select a name for you.
### Installed Components
You can use `kubectl get` to view all of the installed components.
```console{%raw}
$ kubectl get all -l app=zookeeper
NAME: zookeeper
LAST DEPLOYED: Wed Apr 11 17:09:48 2018
NAMESPACE: default
STATUS: DEPLOYED
RESOURCES:
==> v1beta1/PodDisruptionBudget
NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE
zookeeper N/A 1 1 2m
==> v1/Service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
zookeeper-headless ClusterIP None <none> 2181/TCP,3888/TCP,2888/TCP 2m
zookeeper ClusterIP 10.98.179.165 <none> 2181/TCP 2m
==> v1beta1/StatefulSet
NAME DESIRED CURRENT AGE
zookeeper 3 3 2m
==> monitoring.coreos.com/v1/ServiceMonitor
NAME AGE
zookeeper 2m
zookeeper-exporter 2m
```
1. `statefulsets/zookeeper` is the StatefulSet created by the chart.
1. `po/zookeeper-<0|1|2>` are the Pods created by the StatefulSet. Each Pod has a single container running a ZooKeeper server.
1. `svc/zookeeper-headless` is the Headless Service used to control the network domain of the ZooKeeper ensemble.
1. `svc/zookeeper` is a Service that can be used by clients to connect to an available ZooKeeper server.
1. `servicemonitor/zookeeper` is a Prometheus ServiceMonitor which scrapes the jmx-exporter metrics endpoint
1. `servicemonitor/zookeeper-exporter` is a Prometheus ServiceMonitor which scrapes the zookeeper-exporter metrics endpoint
## Configuration
You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
$ helm install --name my-release -f values.yaml incubator/zookeeper
```
## Default Values
- You can find all user-configurable settings, their defaults and commentary about them in [values.yaml](values.yaml).
## Deep Dive
## Image Details
The image used for this chart is based on Alpine 3.9.0.
## JVM Details
The Java Virtual Machine used for this chart is the OpenJDK JVM 8u192 JRE (headless).
## ZooKeeper Details
The chart defaults to ZooKeeper 3.5 (latest released version).
## Failover
You can test failover by killing the leader. Insert a key:
```console
$ kubectl exec zookeeper-0 -- bin/zkCli.sh create /foo bar;
$ kubectl exec zookeeper-2 -- bin/zkCli.sh get /foo;
```
Watch existing members:
```console
$ kubectl run --attach bbox --image=busybox --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo zk-${i} $(echo stats | nc <pod-name>-${i}.<headless-service-name>:2181 | grep Mode); sleep 1; done; done';
zk-2 Mode: follower
zk-0 Mode: follower
zk-1 Mode: leader
zk-2 Mode: follower
```
Delete Pods and wait for the StatefulSet controller to bring them back up:
```console
$ kubectl delete po -l app=zookeeper
$ kubectl get po --watch-only
NAME READY STATUS RESTARTS AGE
zookeeper-0 0/1 Running 0 35s
zookeeper-0 1/1 Running 0 50s
zookeeper-1 0/1 Pending 0 0s
zookeeper-1 0/1 Pending 0 0s
zookeeper-1 0/1 ContainerCreating 0 0s
zookeeper-1 0/1 Running 0 19s
zookeeper-1 1/1 Running 0 40s
zookeeper-2 0/1 Pending 0 0s
zookeeper-2 0/1 Pending 0 0s
zookeeper-2 0/1 ContainerCreating 0 0s
zookeeper-2 0/1 Running 0 19s
zookeeper-2 1/1 Running 0 41s
```
Check the previously inserted key:
```console
$ kubectl exec zookeeper-1 -- bin/zkCli.sh get /foo
ionid = 0x354887858e80035, negotiated timeout = 30000
WATCHER::
WatchedEvent state:SyncConnected type:None path:null
bar
```
## Scaling
ZooKeeper can not be safely scaled in versions prior to 3.5.x
## Limitations
* Only supports storage options that have backends for persistent volume claims.

View File

@@ -0,0 +1,7 @@
Thank you for installing ZooKeeper on your Kubernetes cluster. More information
about ZooKeeper can be found at https://zookeeper.apache.org/doc/current/
Your connection string should look like:
{{ template "zookeeper.fullname" . }}-0.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},{{ template "zookeeper.fullname" . }}-1.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},...
You can also use the client service {{ template "zookeeper.fullname" . }}:{{ .Values.service.ports.client.port }} to connect to an available ZooKeeper server.

View File

@@ -0,0 +1,46 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "zookeeper.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "zookeeper.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "zookeeper.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
The name of the zookeeper headless service.
*/}}
{{- define "zookeeper.headless" -}}
{{- printf "%s-headless" (include "zookeeper.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
The name of the zookeeper chroots job.
*/}}
{{- define "zookeeper.chroots" -}}
{{- printf "%s-chroots" (include "zookeeper.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if .Values.exporters.jmx.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-jmx-exporter
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config.yml: |-
hostPort: 127.0.0.1:{{ .Values.env.JMXPORT }}
lowercaseOutputName: {{ .Values.exporters.jmx.config.lowercaseOutputName }}
rules:
{{ .Values.exporters.jmx.config.rules | toYaml | indent 6 }}
ssl: false
startDelaySeconds: {{ .Values.exporters.jmx.config.startDelaySeconds }}
{{- end }}

View File

@@ -0,0 +1,110 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "zookeeper.fullname" . }}
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: server
data:
ok: |
#!/bin/sh
zkServer.sh status
ready: |
#!/bin/sh
echo ruok | nc 127.0.0.1 ${1:-2181}
run: |
#!/bin/bash
set -a
ROOT=$(echo /apache-zookeeper-*)
ZK_USER=${ZK_USER:-"zookeeper"}
ZK_LOG_LEVEL=${ZK_LOG_LEVEL:-"INFO"}
ZK_DATA_DIR=${ZK_DATA_DIR:-"/data"}
ZK_DATA_LOG_DIR=${ZK_DATA_LOG_DIR:-"/data/log"}
ZK_CONF_DIR=${ZK_CONF_DIR:-"/conf"}
ZK_CLIENT_PORT=${ZK_CLIENT_PORT:-2181}
ZK_SERVER_PORT=${ZK_SERVER_PORT:-2888}
ZK_ELECTION_PORT=${ZK_ELECTION_PORT:-3888}
ZK_TICK_TIME=${ZK_TICK_TIME:-2000}
ZK_INIT_LIMIT=${ZK_INIT_LIMIT:-10}
ZK_SYNC_LIMIT=${ZK_SYNC_LIMIT:-5}
ZK_HEAP_SIZE=${ZK_HEAP_SIZE:-2G}
ZK_MAX_CLIENT_CNXNS=${ZK_MAX_CLIENT_CNXNS:-60}
ZK_MIN_SESSION_TIMEOUT=${ZK_MIN_SESSION_TIMEOUT:- $((ZK_TICK_TIME*2))}
ZK_MAX_SESSION_TIMEOUT=${ZK_MAX_SESSION_TIMEOUT:- $((ZK_TICK_TIME*20))}
ZK_SNAP_RETAIN_COUNT=${ZK_SNAP_RETAIN_COUNT:-3}
ZK_PURGE_INTERVAL=${ZK_PURGE_INTERVAL:-0}
ID_FILE="$ZK_DATA_DIR/myid"
ZK_CONFIG_FILE="$ZK_CONF_DIR/zoo.cfg"
LOG4J_PROPERTIES="$ZK_CONF_DIR/log4j.properties"
HOST=$(hostname)
DOMAIN=`hostname -d`
JVMFLAGS="-Xmx$ZK_HEAP_SIZE -Xms$ZK_HEAP_SIZE"
APPJAR=$(echo $ROOT/*jar)
CLASSPATH="${ROOT}/lib/*:${APPJAR}:${ZK_CONF_DIR}:"
if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
NAME=${BASH_REMATCH[1]}
ORD=${BASH_REMATCH[2]}
MY_ID=$((ORD+1))
else
echo "Failed to extract ordinal from hostname $HOST"
exit 1
fi
mkdir -p $ZK_DATA_DIR
mkdir -p $ZK_DATA_LOG_DIR
echo $MY_ID >> $ID_FILE
echo "clientPort=$ZK_CLIENT_PORT" >> $ZK_CONFIG_FILE
echo "dataDir=$ZK_DATA_DIR" >> $ZK_CONFIG_FILE
echo "dataLogDir=$ZK_DATA_LOG_DIR" >> $ZK_CONFIG_FILE
echo "tickTime=$ZK_TICK_TIME" >> $ZK_CONFIG_FILE
echo "initLimit=$ZK_INIT_LIMIT" >> $ZK_CONFIG_FILE
echo "syncLimit=$ZK_SYNC_LIMIT" >> $ZK_CONFIG_FILE
echo "maxClientCnxns=$ZK_MAX_CLIENT_CNXNS" >> $ZK_CONFIG_FILE
echo "minSessionTimeout=$ZK_MIN_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE
echo "maxSessionTimeout=$ZK_MAX_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE
echo "autopurge.snapRetainCount=$ZK_SNAP_RETAIN_COUNT" >> $ZK_CONFIG_FILE
echo "autopurge.purgeInterval=$ZK_PURGE_INTERVAL" >> $ZK_CONFIG_FILE
echo "4lw.commands.whitelist=*" >> $ZK_CONFIG_FILE
for (( i=1; i<=$ZK_REPLICAS; i++ ))
do
echo "server.$i=$NAME-$((i-1)).$DOMAIN:$ZK_SERVER_PORT:$ZK_ELECTION_PORT" >> $ZK_CONFIG_FILE
done
rm -f $LOG4J_PROPERTIES
echo "zookeeper.root.logger=$ZK_LOG_LEVEL, CONSOLE" >> $LOG4J_PROPERTIES
echo "zookeeper.console.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
echo "zookeeper.log.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
echo "zookeeper.log.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
echo "zookeeper.log.file=zookeeper.log" >> $LOG4J_PROPERTIES
echo "zookeeper.log.maxfilesize=256MB" >> $LOG4J_PROPERTIES
echo "zookeeper.log.maxbackupindex=10" >> $LOG4J_PROPERTIES
echo "zookeeper.tracelog.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
echo "zookeeper.tracelog.file=zookeeper_trace.log" >> $LOG4J_PROPERTIES
echo "log4j.rootLogger=\${zookeeper.root.logger}" >> $LOG4J_PROPERTIES
echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender" >> $LOG4J_PROPERTIES
echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}" >> $LOG4J_PROPERTIES
echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout" >> $LOG4J_PROPERTIES
echo "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n" >> $LOG4J_PROPERTIES
if [ -n "$JMXDISABLE" ]
then
MAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain
else
MAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain"
fi
set -x
exec java -cp "$CLASSPATH" $JVMFLAGS $MAIN $ZK_CONFIG_FILE

View File

@@ -0,0 +1,65 @@
{{- if .Values.jobs.chroots.enabled }}
{{- $root := . }}
{{- $job := .Values.jobs.chroots }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "zookeeper.chroots" . }}
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: jobs
job: chroots
spec:
activeDeadlineSeconds: {{ $job.activeDeadlineSeconds }}
backoffLimit: {{ $job.backoffLimit }}
completions: {{ $job.completions }}
parallelism: {{ $job.parallelism }}
template:
metadata:
labels:
app: {{ template "zookeeper.name" . }}
release: {{ .Release.Name }}
component: jobs
job: chroots
spec:
restartPolicy: {{ $job.restartPolicy }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: main
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/bash
- -o
- pipefail
- -euc
{{- $port := .Values.service.ports.client.port }}
- >
sleep 15;
export SERVER={{ template "zookeeper.fullname" $root }}:{{ $port }};
{{- range $job.config.create }}
echo '==> {{ . }}';
echo '====> Create chroot if does not exist.';
zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid'
|| zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} create {{ . }} "";
echo '====> Confirm chroot exists.';
zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid';
echo '====> Chroot exists.';
{{- end }}
env:
{{- range $key, $value := $job.env }}
- name: {{ $key | upper | replace "." "_" }}
value: {{ $value | quote }}
{{- end }}
resources:
{{ toYaml $job.resources | indent 12 }}
{{- end -}}

View File

@@ -0,0 +1,17 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ template "zookeeper.fullname" . }}
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: server
spec:
selector:
matchLabels:
app: {{ template "zookeeper.name" . }}
release: {{ .Release.Name }}
component: server
{{ toYaml .Values.podDisruptionBudget | indent 2 }}

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "zookeeper.headless" . }}
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.headless.annotations }}
annotations:
{{ .Values.headless.annotations | toYaml | trimSuffix "\n" | indent 4 }}
{{- end }}
spec:
clusterIP: None
{{- if .Values.headless.publishNotReadyAddresses }}
publishNotReadyAddresses: true
{{- end }}
ports:
{{- range $key, $port := .Values.ports }}
- name: {{ $key }}
port: {{ $port.containerPort }}
targetPort: {{ $key }}
protocol: {{ $port.protocol }}
{{- end }}
selector:
app: {{ template "zookeeper.name" . }}
release: {{ .Release.Name }}

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "zookeeper.fullname" . }}
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.service.annotations }}
annotations:
{{- with .Values.service.annotations }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
{{- range $key, $value := .Values.service.ports }}
- name: {{ $key }}
{{ toYaml $value | indent 6 }}
{{- end }}
{{- if .Values.exporters.jmx.enabled }}
{{- range $key, $port := .Values.exporters.jmx.ports }}
- name: {{ $key }}
port: {{ $port.containerPort }}
targetPort: {{ $key }}
protocol: {{ $port.protocol }}
{{- end }}
{{- end}}
{{- if .Values.exporters.zookeeper.enabled }}
{{- range $key, $port := .Values.exporters.zookeeper.ports }}
- name: {{ $key }}
port: {{ $port.containerPort }}
targetPort: {{ $key }}
protocol: {{ $port.protocol }}
{{- end }}
{{- end}}
selector:
app: {{ template "zookeeper.name" . }}
release: {{ .Release.Name }}

View File

@@ -0,0 +1,56 @@
{{- if and .Values.exporters.jmx.enabled .Values.prometheus.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "zookeeper.fullname" . }}
{{- if .Values.prometheus.serviceMonitor.namespace }}
namespace: {{ .Values.prometheus.serviceMonitor.namespace }}
{{- end }}
labels:
{{ toYaml .Values.prometheus.serviceMonitor.selector | indent 4 }}
spec:
endpoints:
{{- range $key, $port := .Values.exporters.jmx.ports }}
- port: {{ $key }}
path: {{ $.Values.exporters.jmx.path }}
interval: {{ $.Values.exporters.jmx.serviceMonitor.interval }}
scrapeTimeout: {{ $.Values.exporters.jmx.serviceMonitor.scrapeTimeout }}
scheme: {{ $.Values.exporters.jmx.serviceMonitor.scheme }}
{{- end }}
selector:
matchLabels:
app: {{ include "zookeeper.name" . }}
release: {{ .Release.Name }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}
---
{{- if and .Values.exporters.zookeeper.enabled .Values.prometheus.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "zookeeper.fullname" . }}-exporter
{{- if .Values.prometheus.serviceMonitor.namespace }}
namespace: {{ .Values.prometheus.serviceMonitor.namespace }}
{{- end }}
labels:
{{ toYaml .Values.prometheus.serviceMonitor.selector | indent 4 }}
spec:
endpoints:
{{- range $key, $port := .Values.exporters.zookeeper.ports }}
- port: {{ $key }}
path: {{ $.Values.exporters.zookeeper.path }}
interval: {{ $.Values.exporters.zookeeper.serviceMonitor.interval }}
scrapeTimeout: {{ $.Values.exporters.zookeeper.serviceMonitor.scrapeTimeout }}
scheme: {{ $.Values.exporters.zookeeper.serviceMonitor.scheme }}
{{- end }}
selector:
matchLabels:
app: {{ include "zookeeper.name" . }}
release: {{ .Release.Name }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,226 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "zookeeper.fullname" . }}
labels:
app: {{ template "zookeeper.name" . }}
chart: {{ template "zookeeper.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: server
spec:
serviceName: {{ template "zookeeper.headless" . }}
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "zookeeper.name" . }}
release: {{ .Release.Name }}
component: server
updateStrategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
template:
metadata:
labels:
app: {{ template "zookeeper.name" . }}
release: {{ .Release.Name }}
component: server
{{- if .Values.podLabels }}
## Custom pod labels
{{- range $key, $value := .Values.podLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
## Custom pod annotations
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
securityContext:
{{ toYaml .Values.securityContext | indent 8 }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: zookeeper
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.command }}
command: {{ range . }}
- {{ . | quote }}
{{- end }}
{{- end }}
ports:
{{- range $key, $port := .Values.ports }}
- name: {{ $key }}
{{ toYaml $port | indent 14 }}
{{- end }}
livenessProbe:
exec:
command:
- sh
- /config-scripts/ok
initialDelaySeconds: 20
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 2
successThreshold: 1
readinessProbe:
exec:
command:
- sh
- /config-scripts/ready
initialDelaySeconds: 20
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 2
successThreshold: 1
env:
- name: ZK_REPLICAS
value: {{ .Values.replicaCount | quote }}
{{- range $key, $value := .Values.env }}
- name: {{ $key | upper | replace "." "_" }}
value: {{ $value | quote }}
{{- end }}
{{- range $secret := .Values.secrets }}
{{- range $key := $secret.keys }}
- name: {{ (print $secret.name "_" $key) | upper }}
valueFrom:
secretKeyRef:
name: {{ $secret.name }}
key: {{ $key }}
{{- end }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
volumeMounts:
- name: data
mountPath: /data
{{- range $secret := .Values.secrets }}
{{- if $secret.mountPath }}
{{- range $key := $secret.keys }}
- name: {{ $.Release.Name }}-{{ $secret.name }}
mountPath: {{ $secret.mountPath }}/{{ $key }}
subPath: {{ $key }}
readOnly: true
{{- end }}
{{- end }}
{{- end }}
- name: config
mountPath: /config-scripts
{{- if .Values.exporters.jmx.enabled }}
- name: jmx-exporter
image: "{{ .Values.exporters.jmx.image.repository }}:{{ .Values.exporters.jmx.image.tag }}"
imagePullPolicy: {{ .Values.exporters.jmx.image.pullPolicy }}
ports:
{{- range $key, $port := .Values.exporters.jmx.ports }}
- name: {{ $key }}
{{ toYaml $port | indent 14 }}
{{- end }}
livenessProbe:
{{ toYaml .Values.exporters.jmx.livenessProbe | indent 12 }}
readinessProbe:
{{ toYaml .Values.exporters.jmx.readinessProbe | indent 12 }}
env:
- name: SERVICE_PORT
value: {{ .Values.exporters.jmx.ports.jmxxp.containerPort | quote }}
{{- with .Values.exporters.jmx.env }}
{{- range $key, $value := . }}
- name: {{ $key | upper | replace "." "_" }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
resources:
{{ toYaml .Values.exporters.jmx.resources | indent 12 }}
volumeMounts:
- name: config-jmx-exporter
mountPath: /opt/jmx_exporter/config.yml
subPath: config.yml
{{- end }}
{{- if .Values.exporters.zookeeper.enabled }}
- name: zookeeper-exporter
image: "{{ .Values.exporters.zookeeper.image.repository }}:{{ .Values.exporters.zookeeper.image.tag }}"
imagePullPolicy: {{ .Values.exporters.zookeeper.image.pullPolicy }}
args:
- -bind-addr=:{{ .Values.exporters.zookeeper.ports.zookeeperxp.containerPort }}
- -metrics-path={{ .Values.exporters.zookeeper.path }}
- -zookeeper=localhost:{{ .Values.ports.client.containerPort }}
- -log-level={{ .Values.exporters.zookeeper.config.logLevel }}
- -reset-on-scrape={{ .Values.exporters.zookeeper.config.resetOnScrape }}
ports:
{{- range $key, $port := .Values.exporters.zookeeper.ports }}
- name: {{ $key }}
{{ toYaml $port | indent 14 }}
{{- end }}
livenessProbe:
{{ toYaml .Values.exporters.zookeeper.livenessProbe | indent 12 }}
readinessProbe:
{{ toYaml .Values.exporters.zookeeper.readinessProbe | indent 12 }}
env:
{{- range $key, $value := .Values.exporters.zookeeper.env }}
- name: {{ $key | upper | replace "." "_" }}
value: {{ $value | quote }}
{{- end }}
resources:
{{ toYaml .Values.exporters.zookeeper.resources | indent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "zookeeper.fullname" . }}
defaultMode: 0555
{{- range .Values.secrets }}
- name: {{ $.Release.Name }}-{{ .name }}
secret:
secretName: {{ .name }}
{{- end }}
{{- if .Values.exporters.jmx.enabled }}
- name: config-jmx-exporter
configMap:
name: {{ .Release.Name }}-jmx-exporter
{{- end }}
{{- if not .Values.persistence.enabled }}
- name: data
emptyDir: {}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,300 @@
## As weighted quorums are not supported, it is imperative that an odd number of replicas
## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7.
##
## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set
replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7)
podDisruptionBudget:
maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions.
terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully.
updateStrategy:
type: RollingUpdate
## refs:
## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1
image:
repository: zookeeper # Container image repository for zookeeper container.
tag: 3.5.5 # Container image tag for zookeeper container.
pullPolicy: IfNotPresent # Image pull criteria for zookeeper container.
service:
type: ClusterIP # Exposes zookeeper on a cluster-internal IP.
annotations: {} # Arbitrary non-identifying metadata for zookeeper service.
## AWS example for use with LoadBalancer service type.
# external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local
# service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
ports:
client:
port: 2181 # Service port number for client port.
targetPort: client # Service target port for client port.
protocol: TCP # Service port protocol for client port.
## Headless service.
##
headless:
annotations: {}
# publishNotReadyAddresses, default false for backward compatibility
# set to true to register DNS entries for unready pods, which helps in rare
# occasions when cluster is unable to be created, DNS caching is enforced
# or pods are in persistent crash loop
publishNotReadyAddresses: false
ports:
client:
containerPort: 2181 # Port number for zookeeper container client port.
protocol: TCP # Protocol for zookeeper container client port.
election:
containerPort: 3888 # Port number for zookeeper container election port.
protocol: TCP # Protocol for zookeeper container election port.
server:
containerPort: 2888 # Port number for zookeeper container server port.
protocol: TCP # Protocol for zookeeper container server port.
resources: {} # Optionally specify how much CPU and memory (RAM) each zookeeper container needs.
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
priorityClassName: ""
nodeSelector: {} # Node label-values required to run zookeeper pods.
tolerations: [] # Node taint overrides for zookeeper pods.
affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods.
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# release: zookeeper
podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods.
# prometheus.io/scrape: "true"
# prometheus.io/path: "/metrics"
# prometheus.io/port: "9141"
podLabels: {} # Key/value pairs that are attached to zookeeper pods.
# team: "developers"
# service: "zookeeper"
securityContext:
fsGroup: 1000
runAsUser: 1000
## Useful, if you want to use an alternate image.
command:
- /bin/bash
- -xec
- /config-scripts/run
## Useful if using any custom authorizer.
## Pass any secrets to the kafka pods. Each secret will be passed as an
## environment variable by default. The secret can also be mounted to a
## specific path (in addition to environment variable) if required. Environment
## variable names are generated as: `<secretName>_<secretKey>` (All upper case)
# secrets:
# - name: myKafkaSecret
# keys:
# - username
# - password
# # mountPath: /opt/kafka/secret
# - name: myZkSecret
# keys:
# - user
# - pass
# mountPath: /opt/zookeeper/secret
persistence:
enabled: true
## zookeeper data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 5Gi
## Exporters query apps for metrics and make those metrics available for
## Prometheus to scrape.
exporters:
jmx:
enabled: false
image:
repository: sscaling/jmx-prometheus-exporter
tag: 0.3.0
pullPolicy: IfNotPresent
config:
lowercaseOutputName: false
## ref: https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml
rules:
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
name: "zookeeper_$2"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
name: "zookeeper_$3"
labels:
replicaId: "$2"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
name: "zookeeper_$4"
labels:
replicaId: "$2"
memberType: "$3"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
name: "zookeeper_$4_$5"
labels:
replicaId: "$2"
memberType: "$3"
startDelaySeconds: 30
env: {}
resources: {}
path: /metrics
ports:
jmxxp:
containerPort: 9404
protocol: TCP
livenessProbe:
httpGet:
path: /metrics
port: jmxxp
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 60
failureThreshold: 8
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: jmxxp
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 60
failureThreshold: 8
successThreshold: 1
serviceMonitor:
interval: 30s
scrapeTimeout: 30s
scheme: http
zookeeper:
## refs:
## - https://github.com/carlpett/zookeeper_exporter
## - https://hub.docker.com/r/josdotso/zookeeper-exporter/
## - https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/#zookeeper-metrics
enabled: false
image:
repository: josdotso/zookeeper-exporter
tag: v1.1.2
pullPolicy: IfNotPresent
config:
logLevel: info
resetOnScrape: "true"
env: {}
resources: {}
path: /metrics
ports:
zookeeperxp:
containerPort: 9141
protocol: TCP
livenessProbe:
httpGet:
path: /metrics
port: zookeeperxp
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 60
failureThreshold: 8
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: zookeeperxp
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 60
failureThreshold: 8
successThreshold: 1
serviceMonitor:
interval: 30s
scrapeTimeout: 30s
scheme: http
## ServiceMonitor configuration in case you are using Prometheus Operator
prometheus:
serviceMonitor:
## If true a ServiceMonitor for each enabled exporter will be installed
enabled: false
## The namespace where the ServiceMonitor(s) will be installed
# namespace: monitoring
## The selector the Prometheus instance is searching for
## [Default Prometheus Operator selector] (https://github.com/helm/charts/blob/f5a751f174263971fafd21eee4e35416d6612a3d/stable/prometheus-operator/templates/prometheus/prometheus.yaml#L74)
selector: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
env:
## Options related to JMX exporter.
## ref: https://github.com/apache/zookeeper/blob/master/bin/zkServer.sh#L36
JMXAUTH: "false"
JMXDISABLE: "false"
JMXPORT: 1099
JMXSSL: "false"
## The port on which the server will accept client requests.
ZOO_PORT: 2181
## The number of Ticks that an ensemble member is allowed to perform leader
## election.
ZOO_INIT_LIMIT: 5
ZOO_TICK_TIME: 2000
## The maximum number of concurrent client connections that
## a server in the ensemble will accept.
ZOO_MAX_CLIENT_CNXNS: 60
## The number of Tick by which a follower may lag behind the ensembles leader.
ZK_SYNC_LIMIT: 10
## The number of wall clock ms that corresponds to a Tick for the ensembles
## internal time.
ZK_TICK_TIME: 2000
ZOO_AUTOPURGE_PURGEINTERVAL: 0
ZOO_AUTOPURGE_SNAPRETAINCOUNT: 3
ZOO_STANDALONE_ENABLED: false
jobs:
## ref: http://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkSessions
chroots:
enabled: false
activeDeadlineSeconds: 300
backoffLimit: 5
completions: 1
config:
create: []
# - /kafka
# - /ureplicator
env: []
parallelism: 1
resources: {}
restartPolicy: Never

View File

@@ -0,0 +1,3 @@
helm uninstall druid -n dsk-middle
helm install druid . -n dsk-middle -f override-values.yaml --create-namespace
helm upgrade druid . -n dsk-middle -f override-values.yaml

View File

@@ -0,0 +1,225 @@
configVars:
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service", "prometheus-emitter","druid-s3-extensions"]'
druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid
# integration druid exporter configuration
druid_emitter: prometheus
druid_emitter_prometheus_strategy: exporter
druid_emitter_prometheus_port: "9000"
druid_monitoring_monitors: '["org.apache.druid.java.util.metrics.JvmMonitor", "org.apache.druid.java.util.metrics.JvmThreadsMonitor"]'
# 폴더 생성이 이상함. 옵션 추후 다시 확인 필요
druid_storage_type: s3
druid_storage_bucket: druid.dev.datasaker.io
druid_storage_baseKey: druid-data/segments
druid_s3_accessKey:
druid_s3_secretKey:
AWS_REGION: "ap-northeast-2"
druid_s3_forceGlobalBucketAccessEnabled: "false"
druid_storage_disableAcl: "true"
druid_indexer_logs_type: s3
druid_indexer_logs_s3Bucket: druid.dev.datasaker.io
druid_indexer_logs_s3Prefix: druid-data/logs
druid_indexer_logs_disableAcl: "true"
druid_s3_endpoint_signingRegion: "ap-northeast-2"
druid_s3_endpoint_url: "https://s3.ap-northeast-2.amazonaws.com/druid.dev.datasaker.io/druid-data"
druid_s3_protocol: "https"
druid_s3_enablePathStyleAccess: "true"
broker:
config:
DRUID_XMX: 8g
DRUID_XMS: 8g
DRUID_MAXDIRECTMEMORYSIZE: 12g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
druid_server_http_maxSubqueryRows: "1000000"
druid_server_http_numThreads: 60
druid_broker_http_numConnections: 50
druid_broker_http_maxQueuedBytes: '10MiB'
druid_processing_numMergeBuffers: 6
druid_processing_buffer_sizeBytes: '500MiB'
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- middle
coordinator:
config:
DRUID_XMX: 8g
DRUID_XMS: 8g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- middle
overlord:
javaOpts: "-Xms4G -Xmx4G"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- middle
historical:
config:
DRUID_XMX: 8g
DRUID_XMS: 8g
DRUID_MAXDIRECTMEMORYSIZE: 12g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
druid_server_http_numThreads: 60
druid_processing_numThreads: 16
druid_processing_numMergeBuffers: 4
druid_processing_buffer_sizeBytes: '500MiB'
druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"300g"}]'
druid_server_maxSize: '800g'
druid_historical_cache_useCache: true
druid_historical_cache_populateCache: true
druid_cache_type: 'caffeine'
druid_cache_sizeInBytes: '256MiB'
persistence:
enabled: false
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- large
middleManager:
config:
DRUID_XMX: 128m
DRUID_XMS: 128m
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
druid_indexer_runner_javaOptsArray: '["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=4g", "-XX:+UseG1GC", "-Duser. timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul. LogManager"]'
druid_indexer_task_baseTaskDir: var/druid/task
druid_worker_capacity: 20
druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '500MiB'
druid_indexer_fork_property_druid_processing_numThreads: 4
druid_indexer_fork_property_druid_processing_numMergeBuffers: 2
persistence:
enabled: false
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- large
router:
config:
DRUID_XMX: 1g
DRUID_XMS: 1g
DRUID_MAXDIRECTMEMORYSIZE: 3g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
serviceType: NodePort
# templates/router/service.yaml에서 nodePort 속성 추가
nodePort: 30888
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- small
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
zookeeper:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# postgres:
# ------------------------------------------------------------------------------
postgresql:
master:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
slave:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid

View File

@@ -0,0 +1,175 @@
configVars:
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service"]'
druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid
# integration druid exporter configuration
druid_emitter: http
druid_emitter_http_recipientBaseUrl: http://prometheus-druid-exporter:8080/druid
broker:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
DRUID_MAXDIRECTMEMORYSIZE: 1g
druid_server_http_maxSubqueryRows: "1000000"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
coordinator:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
overlord:
javaOpts: "-Xms4G -Xmx4G"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
historical:
config:
DRUID_XMX: 10g
DRUID_XMS: 10g
druid_processing_numThreads: 3
druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]'
druid_server_maxSize: '500g'
persistence:
size: "500Gi"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
middleManager:
config:
DRUID_XMX: 1g
DRUID_XMS: 1g
druid_indexer_runner_javaOptsArray: '["-server", "-Xms4g", "-Xmx4g", "-XX:MaxDirectMemorySize=6g", "-XX:+UseStringDeduplication", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]'
druid_worker.capacity: 8
druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB'
druid_indexer_fork_property_druid_processing_numThreads: 1
druid_indexer_fork_property_druid_processing_numMergeBuffers: 2
persistence:
size: "500Gi"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
router:
serviceType: NodePort
# templates/router/service.yaml에서 nodePort 속성 추가
nodePort: 30888
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
zookeeper:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# postgres:
# ------------------------------------------------------------------------------
postgresql:
master:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
slave:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid

View File

@@ -0,0 +1,194 @@
configVars:
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service", "druid-s3-extensions"]'
druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid
# integration druid exporter configuration
druid_emitter: http
druid_emitter_http_recipientBaseUrl: http://prometheus-druid-exporter:8080/druid
# 폴더 생성이 이상함. 옵션 추후 다시 확인 필요
druid_storage_type: s3
druid_storage_bucket: druid.dev.datasaker.io
druid_storage_baseKey: druid-data/segments
druid_s3_accessKey:
druid_s3_secretKey:
AWS_REGION: "ap-northeast-2"
druid_s3_forceGlobalBucketAccessEnabled: "false"
druid_storage_disableAcl: "true"
druid_indexer_logs_type: s3
druid_indexer_logs_s3Bucket: druid.dev.datasaker.io
druid_indexer_logs_s3Prefix: druid-data/logs
druid_indexer_logs_disableAcl: "true"
druid_s3_endpoint_signingRegion: "ap-northeast-2"
druid_s3_endpoint_url: "https://s3.ap-northeast-2.amazonaws.com/druid.dev.datasaker.io/druid-data"
druid_s3_protocol: "https"
druid_s3_enablePathStyleAccess: "true"
broker:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
DRUID_MAXDIRECTMEMORYSIZE: 1g
druid_server_http_maxSubqueryRows: "1000000"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kops.k8s.io/instancegroup
operator: In
values:
- dev-data-druid-b
coordinator:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kops.k8s.io/instancegroup
operator: In
values:
- dev-data-druid-b
overlord:
javaOpts: "-Xms4G -Xmx4G"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kops.k8s.io/instancegroup
operator: In
values:
- dev-data-druid-b
historical:
config:
DRUID_XMX: 10g
DRUID_XMS: 10g
druid_processing_numThreads: 3
druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]'
druid_server_maxSize: '500g'
persistence:
enabled: false
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kops.k8s.io/instancegroup
operator: In
values:
- dev-data-druid-c
middleManager:
config:
DRUID_XMX: 1g
DRUID_XMS: 1g
druid_indexer_runner_javaOptsArray: '["-server", "-Xms4g", "-Xmx4g", "-XX:MaxDirectMemorySize=6g", "-XX:+UseStringDeduplication", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]'
druid_worker.capacity: 8
druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB'
druid_indexer_fork_property_druid_processing_numThreads: 1
druid_indexer_fork_property_druid_processing_numMergeBuffers: 2
persistence:
enabled: false
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kops.k8s.io/instancegroup
operator: In
values:
- dev-data-druid-c
router:
serviceType: NodePort
# templates/router/service.yaml에서 nodePort 속성 추가
nodePort: 30888
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kops.k8s.io/instancegroup
operator: In
values:
- dev-data-druid-b
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
zookeeper:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# postgres:
# ------------------------------------------------------------------------------
postgresql:
master:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
slave:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid

View File

@@ -0,0 +1,171 @@
configVars:
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service"]'
druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid
broker:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
DRUID_MAXDIRECTMEMORYSIZE: 1g
druid_server_http_maxSubqueryRows: "1000000"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
coordinator:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
overlord:
javaOpts: "-Xms4G -Xmx4G"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
historical:
config:
DRUID_XMX: 2g
DRUID_XMS: 2g
druid_processing_numThreads: 3
druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]'
druid_server_maxSize: '500g'
persistence:
size: "500Gi"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
middleManager:
config:
DRUID_XMX: 9g
DRUID_XMS: 9g
druid_indexer_runner_javaOptsArray: '["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=3g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]'
druid_worker.capacity: 8
druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB'
druid_indexer_fork_property_druid_processing_numThreads: 1
druid_indexer_fork_property_druid_processing_numMergeBuffers: 2
persistence:
size: "500Gi"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
router:
serviceType: NodePort
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
# zookeeper:
zookeeper:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# postgres:
# ------------------------------------------------------------------------------
# postgresql:
postgresql:
master:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
slave:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# Secrets

View File

@@ -0,0 +1,38 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
1. Get the router URL by running these commands:
{{- if .Values.router.ingress.enabled }}
{{- range .Values.router.ingress.hosts }}
http{{ if $.Values.router.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.router.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.router.serviceType }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "druid.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.router.serviceType }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "druid.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "druid.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.router.port }}
{{- else if contains "ClusterIP" .Values.router.serviceType }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "druid.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:{{ .Values.router.port }}
{{- end }}

View File

@@ -0,0 +1,100 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "druid.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "druid.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "druid.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified historical name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "druid.historical.fullname" -}}
{{ template "druid.fullname" . }}-{{ .Values.historical.name }}
{{- end -}}
{{/*
Create a default fully qualified middleManager name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "druid.middleManager.fullname" -}}
{{ template "druid.fullname" . }}-{{ .Values.middleManager.name }}
{{- end -}}
{{/*
Create a default fully qualified broker name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "druid.broker.fullname" -}}
{{ template "druid.fullname" . }}-{{ .Values.broker.name }}
{{- end -}}
{{/*
Create a default fully qualified overlord name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "druid.overlord.fullname" -}}
{{ template "druid.fullname" . }}-{{ .Values.overlord.name }}
{{- end -}}
{{/*
Create a default fully qualified coordinator name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "druid.coordinator.fullname" -}}
{{ template "druid.fullname" . }}-{{ .Values.coordinator.name }}
{{- end -}}
{{/*
Create a default fully qualified router name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "druid.router.fullname" -}}
{{ template "druid.fullname" . }}-{{ .Values.router.name }}
{{- end -}}

View File

@@ -0,0 +1,99 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.broker.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "druid.broker.fullname" . }}
labels:
app: {{ include "druid.name" . }}
chart: {{ include "druid.chart" . }}
component: {{ .Values.broker.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.broker.replicaCount }}
selector:
matchLabels:
app: {{ include "druid.name" . }}
release: {{ .Release.Name }}
component: {{ .Values.broker.name }}
template:
metadata:
labels:
app: {{ include "druid.name" . }}
release: {{ .Release.Name }}
component: {{ .Values.broker.name }}
{{- with .Values.broker.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: [ "broker" ]
env:
{{- range $key, $val := .Values.broker.config }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end}}
envFrom:
- configMapRef:
name: {{ template "druid.name" . }}
ports:
- name: http
containerPort: {{ .Values.broker.port }}
protocol: TCP
{{ if .Values.configVars.druid_emitter_prometheus_port }}
- name: metric
containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }}
protocol: TCP
{{ end }}
livenessProbe:
initialDelaySeconds: 60
httpGet:
path: /status/health
port: {{ .Values.broker.port }}
readinessProbe:
initialDelaySeconds: 60
httpGet:
path: /status/health
port: {{ .Values.broker.port }}
resources:
{{ toYaml .Values.broker.resources | indent 12 }}
{{- with .Values.broker.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.broker.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.broker.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,58 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.broker.ingress.enabled -}}
{{- $fullName := include "druid.broker.fullname" . -}}
{{- $ingressPath := .Values.broker.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ include "druid.name" . }}
chart: {{ include "druid.chart" . }}
component: {{ .Values.broker.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.broker.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.broker.ingress.tls }}
tls:
{{- range .Values.broker.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.broker.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@@ -0,0 +1,48 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.broker.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "druid.broker.fullname" . }}
labels:
app: {{ include "druid.name" . }}
chart: {{ include "druid.chart" . }}
component: {{ .Values.broker.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.broker.serviceType }}
ports:
- port: {{ .Values.broker.port }}
targetPort: http
protocol: TCP
name: http
{{ if .Values.configVars.druid_emitter_prometheus_port }}
- port: {{ .Values.configVars.druid_emitter_prometheus_port }}
targetPort: metric
protocol: TCP
name: metric
{{ end }}
selector:
app: {{ include "druid.name" . }}
release: {{ .Release.Name }}
component: {{ .Values.broker.name }}
{{- end }}

View File

@@ -0,0 +1,52 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.configMap.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "druid.name" . }}
labels:
app: {{ template "druid.name" . }}
chart: {{ template "druid.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{ toYaml .Values.configVars | indent 2 }}
{{- if .Values.zookeeper.enabled }}
druid_zk_service_host: {{ .Release.Name }}-zookeeper-headless:2181
{{- else }}
druid_zk_service_host: {{ .Values.zkHosts }}
{{- end }}
{{- if .Values.mysql.enabled }}
druid_metadata_storage_type: mysql
druid_metadata_storage_connector_connectURI: jdbc:mysql://{{ .Release.Name }}-mysql:3306/{{ .Values.mysql.mysqlDatabase}}
druid_metadata_storage_connector_user: {{ .Values.mysql.mysqlUser }}
druid_metadata_storage_connector_password: {{ .Values.mysql.mysqlPassword }}
{{- end }}
{{- if .Values.postgresql.enabled }}
druid_metadata_storage_type: postgresql
druid_metadata_storage_connector_connectURI: jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port}}/{{ .Values.postgresql.postgresqlDatabase }}
druid_metadata_storage_connector_user: {{ .Values.postgresql.postgresqlUsername }}
druid_metadata_storage_connector_password: {{ .Values.postgresql.postgresqlPassword }}
{{- end }}
{{- if .Values.gCloudStorage.enabled }}
GOOGLE_APPLICATION_CREDENTIALS: /var/secrets/google/key.json
{{- end }}
{{- end }}

View File

@@ -0,0 +1,110 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.coordinator.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "druid.coordinator.fullname" . }}
labels:
app: {{ include "druid.name" . }}
chart: {{ include "druid.chart" . }}
component: {{ .Values.coordinator.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.coordinator.replicaCount }}
selector:
matchLabels:
app: {{ include "druid.name" . }}
release: {{ .Release.Name }}
component: {{ .Values.coordinator.name }}
template:
metadata:
labels:
app: {{ include "druid.name" . }}
release: {{ .Release.Name }}
component: {{ .Values.coordinator.name }}
{{- with .Values.coordinator.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: [ "coordinator" ]
env:
{{- range $key, $val := .Values.coordinator.config }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end}}
envFrom:
- configMapRef:
name: {{ template "druid.name" . }}
ports:
- name: http
containerPort: {{ .Values.coordinator.port }}
protocol: TCP
{{ if .Values.configVars.druid_emitter_prometheus_port }}
- name: metric
containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }}
protocol: TCP
{{ end }}
livenessProbe:
initialDelaySeconds: 60
httpGet:
path: /status/health
port: {{ .Values.coordinator.port }}
readinessProbe:
initialDelaySeconds: 60
httpGet:
path: /status/health
port: {{ .Values.coordinator.port }}
resources:
{{ toYaml .Values.coordinator.resources | indent 12 }}
volumeMounts:
{{- if .Values.gCloudStorage.enabled }}
- name: google-cloud-key
mountPath: /var/secrets/google
{{- end }}
volumes:
{{- if .Values.gCloudStorage.enabled }}
- name: google-cloud-key
secret:
secretName: {{ .Values.gCloudStorage.secretName }}
{{- end }}
{{- with .Values.coordinator.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.coordinator.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.coordinator.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,58 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.coordinator.ingress.enabled -}}
{{- $fullName := include "druid.coordinator.fullname" . -}}
{{- $ingressPath := .Values.coordinator.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ include "druid.name" . }}
chart: {{ include "druid.chart" . }}
component: {{ .Values.coordinator.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.coordinator.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.coordinator.ingress.tls }}
tls:
{{- range .Values.coordinator.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.coordinator.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@@ -0,0 +1,48 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.coordinator.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "druid.coordinator.fullname" . }}
labels:
app: {{ include "druid.name" . }}
chart: {{ include "druid.chart" . }}
component: {{ .Values.coordinator.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.coordinator.serviceType }}
ports:
- port: {{ .Values.coordinator.port }}
targetPort: http
protocol: TCP
name: http
{{ if .Values.configVars.druid_emitter_prometheus_port }}
- port: {{ .Values.configVars.druid_emitter_prometheus_port }}
targetPort: metric
protocol: TCP
name: metric
{{ end }}
selector:
app: {{ include "druid.name" . }}
release: {{ .Release.Name }}
component: {{ .Values.coordinator.name }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More