Teleport Chart 업데이트

This commit is contained in:
ByeonJungHun
2024-01-22 12:12:36 +09:00
parent fde2f5f8a7
commit 7c1afcf6d7
163 changed files with 15784 additions and 71 deletions

View File

@@ -0,0 +1,3 @@
{{- define "teleport-cluster.proxy.config.aws" -}}
{{ include "teleport-cluster.proxy.config.common" . }}
{{- end -}}

View File

@@ -0,0 +1,3 @@
{{- define "teleport-cluster.proxy.config.azure" -}}
{{ include "teleport-cluster.proxy.config.common" . }}
{{- end -}}

View File

@@ -0,0 +1,76 @@
{{- define "teleport-cluster.proxy.config.common" -}}
{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}}
version: v3
teleport:
join_params:
method: kubernetes
token_name: "{{.Release.Name}}-proxy"
auth_server: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3025"
log:
severity: {{ $logLevel }}
output: {{ .Values.log.output }}
format:
output: {{ .Values.log.format }}
extra_fields: {{ .Values.log.extraFields | toJson }}
ssh_service:
enabled: false
auth_service:
enabled: false
proxy_service:
enabled: true
{{- if .Values.publicAddr }}
public_addr: {{- toYaml .Values.publicAddr | nindent 8 }}
{{- else }}
public_addr: '{{ required "clusterName is required in chart values" .Values.clusterName }}:443'
{{- end }}
{{- if ne .Values.proxyListenerMode "multiplex" }}
listen_addr: 0.0.0.0:3023
{{- if .Values.sshPublicAddr }}
ssh_public_addr: {{- toYaml .Values.sshPublicAddr | nindent 8 }}
{{- end }}
tunnel_listen_addr: 0.0.0.0:3024
{{- if .Values.tunnelPublicAddr }}
tunnel_public_addr: {{- toYaml .Values.tunnelPublicAddr | nindent 8 }}
{{- end }}
kube_listen_addr: 0.0.0.0:3026
{{- if .Values.kubePublicAddr }}
kube_public_addr: {{- toYaml .Values.kubePublicAddr | nindent 8 }}
{{- end }}
mysql_listen_addr: 0.0.0.0:3036
{{- if .Values.mysqlPublicAddr }}
mysql_public_addr: {{- toYaml .Values.mysqlPublicAddr | nindent 8 }}
{{- end }}
{{- if .Values.separatePostgresListener }}
postgres_listen_addr: 0.0.0.0:5432
{{- if .Values.postgresPublicAddr }}
postgres_public_addr: {{- toYaml .Values.postgresPublicAddr | nindent 8 }}
{{- else }}
postgres_public_addr: {{ .Values.clusterName }}:5432
{{- end }}
{{- end }}
{{- if .Values.separateMongoListener }}
mongo_listen_addr: 0.0.0.0:27017
{{- if .Values.mongoPublicAddr }}
mongo_public_addr: {{- toYaml .Values.mongoPublicAddr | nindent 8 }}
{{- else }}
mongo_public_addr: {{ .Values.clusterName }}:27017
{{- end }}
{{- end }}
{{- end }}
{{- if or .Values.highAvailability.certManager.enabled .Values.tls.existingSecretName }}
https_keypairs:
- key_file: /etc/teleport-tls/tls.key
cert_file: /etc/teleport-tls/tls.crt
https_keypairs_reload_interval: 12h
{{- else if .Values.acme }}
acme:
enabled: {{ .Values.acme }}
email: {{ required "acmeEmail is required in chart values" .Values.acmeEmail }}
{{- if .Values.acmeURI }}
uri: {{ .Values.acmeURI }}
{{- end }}
{{- end }}
{{- if and .Values.ingress.enabled (semverCompare ">= 13.2.0-0" (include "teleport-cluster.version" .)) }}
trust_x_forwarded_for: true
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,3 @@
{{- define "teleport-cluster.proxy.config.gcp" -}}
{{ include "teleport-cluster.proxy.config.common" . }}
{{- end -}}

View File

@@ -0,0 +1,12 @@
{{- define "teleport-cluster.proxy.config.scratch" -}}
ssh_service:
enabled: false
auth_service:
enabled: false
proxy_service:
enabled: true
{{- end -}}
{{- define "teleport-cluster.proxy.config.custom" -}}
{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }}
{{- end -}}

View File

@@ -0,0 +1,3 @@
{{- define "teleport-cluster.proxy.config.standalone" -}}
{{ include "teleport-cluster.proxy.config.common" . }}
{{- end -}}

View File

@@ -0,0 +1,27 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- if $proxy.highAvailability.certManager.enabled }}
{{- $domain := (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }}
{{- $domainWildcard := printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }}
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
spec:
secretName: teleport-tls
{{- if $proxy.highAvailability.certManager.addCommonName }}
commonName: {{ quote $domain }}
{{- end }}
dnsNames:
- {{ quote $domain }}
- {{ quote $domainWildcard }}
issuerRef:
name: {{ required "highAvailability.certManager.issuerName is required in chart values" $proxy.highAvailability.certManager.issuerName }}
kind: {{ required "highAvailability.certManager.issuerKind is required in chart values" $proxy.highAvailability.certManager.issuerKind }}
group: {{ required "highAvailability.certManager.issuerGroup is required in chart values" $proxy.highAvailability.certManager.issuerGroup }}
{{- with $proxy.annotations.certSecret }}
secretTemplate:
annotations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,16 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}}
{{- if (contains ":" $proxy.clusterName) -}}
{{- fail "clusterName must not contain a colon, you can override the cluster's public address with publicAddr" -}}
{{- end -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-proxy
namespace: {{ .Release.Namespace }}
{{- if $proxy.annotations.config }}
annotations: {{- toYaml $proxy.annotations.config | nindent 4 }}
{{- end }}
data:
teleport.yaml: |2
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}}

View File

@@ -0,0 +1,307 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- $replicable := or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName -}}
{{- $projectedServiceAccountToken := semverCompare ">=1.20.0-0" .Capabilities.KubeVersion.Version }}
# Deployment is {{ if not $replicable }}not {{end}}replicable
{{- if and $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
{{- fail "Cannot set both highAvailability.certManager.enabled and tls.existingSecretName, choose one or the other" }}
{{- end }}
{{- if and $proxy.acme $proxy.tls.existingSecretName }}
{{- fail "Cannot set both acme.enabled and tls.existingSecretName, choose one or the other" }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-proxy
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
{{- if $proxy.annotations.deployment }}
annotations: {{- toYaml $proxy.annotations.deployment | nindent 4 }}
{{- end }}
spec:
{{- /*
If proxies cannot be replicated we use a single replica.
By default we want to upgrade all users to at least 2 replicas, if they had a higher replica count we take it.
If a user wants to force a single proxy, they can use the `proxy` specific override.
$proxySpecificHA is a hack to avoid .Values.proxy.highAvailability to be nil, which would cause a fail when
accessing .Values.proxy.highAvailability.replicaCount.
*/}}
{{- if $replicable }}
{{- $proxySpecificHA := default (dict) .Values.proxy.highAvailability }}
{{- if $proxySpecificHA.replicaCount }}
replicas: {{ $proxySpecificHA.replicaCount }}
{{- else }}
replicas: {{ max .Values.highAvailability.replicaCount 2 }}
{{- end }}
{{- if $proxy.highAvailability.minReadySeconds }}
minReadySeconds: {{ $proxy.highAvailability.minReadySeconds }}
{{- end }}
{{- else }}
replicas: 1
{{- end }}
selector:
matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
# ConfigMap checksum, to recreate the pod on config changes.
checksum/config: {{ include (print $.Template.BasePath "/proxy/config.yaml") . | sha256sum }}
{{- if $proxy.annotations.pod }}
{{- toYaml $proxy.annotations.pod | nindent 8 }}
{{- end }}
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 8 }}
spec:
{{- if $proxy.nodeSelector }}
nodeSelector: {{- toYaml $proxy.nodeSelector | nindent 8 }}
{{- end }}
affinity:
{{- if $proxy.affinity }}
{{- if $proxy.highAvailability.requireAntiAffinity }}
{{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }}
{{- end }}
{{- toYaml $proxy.affinity | nindent 8 }}
{{- else }}
podAntiAffinity:
{{- if $proxy.highAvailability.requireAntiAffinity }}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
- key: app.kubernetes.io/component
operator: In
values:
- proxy
topologyKey: "kubernetes.io/hostname"
{{- else if gt (int $proxy.highAvailability.replicaCount) 1 }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
- key: app.kubernetes.io/component
operator: In
values:
- proxy
topologyKey: "kubernetes.io/hostname"
{{- end }}
{{- end }}
{{- if $proxy.tolerations }}
tolerations: {{- toYaml $proxy.tolerations | nindent 6 }}
{{- end }}
{{- if $proxy.imagePullSecrets }}
imagePullSecrets:
{{- toYaml $proxy.imagePullSecrets | nindent 6 }}
{{- end }}
initContainers:
# wait-auth-update is responsible for holding off the proxy rollout until all auths are running the
# next major version in case of major upgrade.
- name: wait-auth-update
image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
command:
- teleport
- wait
- no-resolve
- '{{ include "teleport-cluster.auth.previousVersionServiceName" . }}.{{ .Release.Namespace }}.svc.cluster.local'
{{- if $proxy.securityContext }}
securityContext: {{- toYaml $proxy.securityContext | nindent 12 }}
{{- end }}
{{- if $proxy.initContainers }}
{{- range $initContainer := $proxy.initContainers }}
{{- if and (not $initContainer.resources) $proxy.resources }}
{{- $_ := set $initContainer "resources" $proxy.resources }}
{{- end }}
{{- list $initContainer | toYaml | nindent 8 }}
{{- /* Note: this will break if the user sets volumeMounts to its initContainer */}}
volumeMounts:
{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
- mountPath: /etc/teleport-tls
name: "teleport-tls"
readOnly: true
{{- end }}
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if $proxy.extraVolumeMounts }}
{{- toYaml $proxy.extraVolumeMounts | nindent 10 }}
{{- end }}
{{- end }}
{{- end }}
containers:
- name: "teleport"
image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
imagePullPolicy: {{ $proxy.imagePullPolicy }}
{{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }}
env:
{{- if (gt (len $proxy.extraEnv) 0) }}
{{- toYaml $proxy.extraEnv | nindent 8 }}
{{- end }}
{{- if $proxy.tls.existingCASecretName }}
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
{{- end }}
{{- end }}
args:
- "--diag-addr=0.0.0.0:3000"
{{- if $proxy.insecureSkipProxyTLSVerify }}
- "--insecure"
{{- end }}
{{- if $proxy.extraArgs }}
{{- toYaml $proxy.extraArgs | nindent 8 }}
{{- end }}
ports:
- name: tls
containerPort: 3080
protocol: TCP
{{- if $proxy.enterprise }}
- name: proxypeering
containerPort: 3021
protocol: TCP
{{- end }}
{{- if ne $proxy.proxyListenerMode "multiplex" }}
- name: sshproxy
containerPort: 3023
protocol: TCP
- name: sshtun
containerPort: 3024
protocol: TCP
- name: kube
containerPort: 3026
protocol: TCP
- name: mysql
containerPort: 3036
protocol: TCP
{{- if $proxy.separatePostgresListener }}
- name: postgres
containerPort: 5432
protocol: TCP
{{- end }}
{{- if $proxy.separateMongoListener }}
- name: mongo
containerPort: 27017
protocol: TCP
{{- end }}
{{- end }}
- name: diag
containerPort: 3000
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5 # wait 5s for agent to start
periodSeconds: 5 # poll health every 5s
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
timeoutSeconds: {{ $proxy.probeTimeoutSeconds }}
readinessProbe:
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5 # wait 5s for agent to register
periodSeconds: 5 # poll health every 5s
failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s)
timeoutSeconds: {{ $proxy.probeTimeoutSeconds }}
lifecycle:
# waiting during preStop ensures no new request will hit the Terminating pod
# on clusters using kube-proxy (kube-proxy syncs the node iptables rules every 30s)
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
{{- if $proxy.postStart.command }}
postStart:
exec:
command: {{ toYaml $proxy.postStart.command | nindent 14 }}
{{- end }}
{{- if $proxy.resources }}
resources:
{{- toYaml $proxy.resources | nindent 10 }}
{{- end }}
{{- if $proxy.securityContext }}
securityContext: {{- toYaml $proxy.securityContext | nindent 10 }}
{{- end }}
volumeMounts:
{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
- mountPath: /etc/teleport-tls
name: "teleport-tls"
readOnly: true
{{- end }}
{{- if $proxy.tls.existingCASecretName }}
- mountPath: /etc/teleport-tls-ca
name: "teleport-tls-ca"
readOnly: true
{{- end }}
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if $projectedServiceAccountToken }}
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true
{{- end }}
{{- if $proxy.extraVolumeMounts }}
{{- toYaml $proxy.extraVolumeMounts | nindent 8 }}
{{- end }}
{{- if $projectedServiceAccountToken }}
automountServiceAccountToken: false
{{- end }}
volumes:
{{- if $projectedServiceAccountToken }}
# This projected token volume mimics the `automountServiceAccountToken`
# behaviour but defaults to a 1h TTL instead of 1y.
- name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- if $proxy.highAvailability.certManager.enabled }}
- name: teleport-tls
secret:
secretName: teleport-tls
{{- else if $proxy.tls.existingSecretName }}
- name: teleport-tls
secret:
secretName: {{ $proxy.tls.existingSecretName }}
{{- end }}
{{- if $proxy.tls.existingCASecretName }}
- name: teleport-tls-ca
secret:
secretName: {{ $proxy.tls.existingCASecretName }}
{{- end }}
- name: "config"
configMap:
name: {{ .Release.Name }}-proxy
- name: "data"
emptyDir: {}
{{- if $proxy.extraVolumes }}
{{- toYaml $proxy.extraVolumes | nindent 6 }}
{{- end }}
{{- if $proxy.priorityClassName }}
priorityClassName: {{ $proxy.priorityClassName }}
{{- end }}
serviceAccountName: {{ include "teleport-cluster.proxy.serviceAccountName" . }}
terminationGracePeriodSeconds: {{ $proxy.terminationGracePeriodSeconds }}

View File

@@ -0,0 +1,57 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- if .Values.ingress.enabled -}}
{{- if (not (eq .Values.proxyListenerMode "multiplex")) -}}
{{- fail "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/" -}}
{{- end -}}
{{- $publicAddr := coalesce .Values.publicAddr (list .Values.clusterName) -}}
{{- /* Trim ports from all public addresses if present */ -}}
{{- range $publicAddr -}}
{{- $address := . -}}
{{- if (contains ":" $address) -}}
{{- $split := split ":" $address -}}
{{- $address = $split._0 -}}
{{- $publicAddr = append (mustWithout $publicAddr .) $address -}}
{{- end -}}
{{- $wildcard := printf "*.%s" $address -}}
{{- /* Add wildcard versions of all public addresses to ingress, unless 1) suppressed or 2) wildcard version already exists */ -}}
{{- if and (not $.Values.ingress.suppressAutomaticWildcards) (not (hasPrefix "*." $address)) (not (has $wildcard $publicAddr)) -}}
{{- $publicAddr = append $publicAddr (printf "*.%s" $address) -}}
{{- end -}}
{{- end -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-proxy
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
{{- if $proxy.annotations.ingress }}
annotations: {{- toYaml $proxy.annotations.ingress | nindent 4 }}
{{- end }}
spec:
{{- with $proxy.ingress.spec }}
{{- toYaml . | nindent 2 }}
{{- end }}
tls:
- hosts:
{{- range $publicAddr }}
- {{ quote . }}
{{- end }}
{{- if $proxy.highAvailability.certManager.enabled }}
secretName: teleport-tls
{{- else if $proxy.tls.existingSecretName }}
secretName: {{ $proxy.tls.existingSecretName }}
{{- end }}
rules:
{{- range $publicAddr }}
- host: {{ quote . }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ $.Release.Name }}
port:
number: 443
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- if $proxy.highAvailability.podDisruptionBudget.enabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ .Release.Name }}-proxy
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
spec:
minAvailable: {{ $proxy.highAvailability.podDisruptionBudget.minAvailable }}
selector:
matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -0,0 +1,16 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- if $proxy.validateConfigOnDeploy }}
{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-proxy-test
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "4"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
data:
teleport.yaml: |2
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}}
{{- end }}

View File

@@ -0,0 +1,99 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- if $proxy.validateConfigOnDeploy }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-proxy-test
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "5"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
spec:
backoffLimit: 1
template:
spec:
{{- if $proxy.affinity }}
affinity: {{- toYaml $proxy.affinity | nindent 8 }}
{{- end }}
{{- if $proxy.tolerations }}
tolerations: {{- toYaml $proxy.tolerations | nindent 6 }}
{{- end }}
{{- if $proxy.imagePullSecrets }}
imagePullSecrets:
{{- toYaml $proxy.imagePullSecrets | nindent 6 }}
{{- end }}
restartPolicy: Never
containers:
- name: "teleport"
image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
imagePullPolicy: {{ $proxy.imagePullPolicy }}
{{- if $proxy.resources }}
resources:
{{- toYaml $proxy.resources | nindent 10 }}
{{- end }}
{{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }}
env:
{{- if (gt (len $proxy.extraEnv) 0) }}
{{- toYaml $proxy.extraEnv | nindent 8 }}
{{- end }}
{{- if $proxy.tls.existingCASecretName }}
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
{{- end }}
{{- end }}
command:
- "teleport"
- "configure"
args:
- "--test"
- "/etc/teleport/teleport.yaml"
{{- if $proxy.securityContext }}
securityContext: {{- toYaml $proxy.securityContext | nindent 10 }}
{{- end }}
volumeMounts:
{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
- mountPath: /etc/teleport-tls
name: "teleport-tls"
readOnly: true
{{- end }}
{{- if $proxy.tls.existingCASecretName }}
- mountPath: /etc/teleport-tls-ca
name: "teleport-tls-ca"
readOnly: true
{{- end }}
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if $proxy.extraVolumeMounts }}
{{- toYaml $proxy.extraVolumeMounts | nindent 8 }}
{{- end }}
volumes:
{{- if $proxy.highAvailability.certManager.enabled }}
- name: teleport-tls
secret:
secretName: teleport-tls
# this avoids deadlock during initial setup
optional: true
{{- else if $proxy.tls.existingSecretName }}
- name: teleport-tls
secret:
secretName: {{ $proxy.tls.existingSecretName }}
{{- end }}
{{- if $proxy.tls.existingCASecretName }}
- name: teleport-tls-ca
secret:
secretName: {{ $proxy.tls.existingCASecretName }}
{{- end }}
- name: "config"
configMap:
name: {{ .Release.Name }}-proxy-test
- name: "data"
emptyDir: {}
{{- if $proxy.extraVolumes }}
{{- toYaml $proxy.extraVolumes | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,70 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- $backendProtocol := ternary "ssl" "tcp" (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-ssl-cert") -}}
{{- /* Fail early if proxy service type is set to LoadBalancer when ingress.enabled=true */ -}}
{{- if and $proxy.ingress.enabled (eq $proxy.service.type "LoadBalancer") -}}
{{- fail "proxy.service.type must not be LoadBalancer when using an ingress - any load balancer should be provisioned by your ingress controller. Set proxy.service.type=ClusterIP instead" -}}
{{- end -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
{{- if (or ($proxy.annotations.service) (eq $proxy.chartMode "aws")) }}
annotations:
{{- if and (eq $proxy.chartMode "aws") (not $proxy.ingress.enabled) }}
{{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-backend-protocol")}}
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: {{ $backendProtocol }}
{{- end }}
{{- if not (or (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled") (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-attributes"))}}
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
{{- end }}
{{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-type")}}
service.beta.kubernetes.io/aws-load-balancer-type: nlb
{{- end }}
{{- end }}
{{- if $proxy.annotations.service }}
{{- toYaml $proxy.annotations.service | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: {{ default "LoadBalancer" $proxy.service.type }}
{{- with $proxy.service.spec }}
{{- toYaml . | nindent 2 }}
{{- end }}
ports:
- name: tls
port: 443
targetPort: 3080
protocol: TCP
{{- if ne $proxy.proxyListenerMode "multiplex" }}
- name: sshproxy
port: 3023
targetPort: 3023
protocol: TCP
- name: k8s
port: 3026
targetPort: 3026
protocol: TCP
- name: sshtun
port: 3024
targetPort: 3024
protocol: TCP
- name: mysql
port: 3036
targetPort: 3036
protocol: TCP
{{- if $proxy.separatePostgresListener }}
- name: postgres
port: 5432
targetPort: 5432
protocol: TCP
{{- end }}
{{- if $proxy.separateMongoListener }}
- name: mongo
port: 27017
targetPort: 27017
protocol: TCP
{{- end }}
{{- end }}
selector: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,11 @@
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
{{- if $proxy.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "teleport-cluster.proxy.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- if $proxy.annotations.serviceAccount }}
annotations: {{- toYaml $proxy.annotations.serviceAccount | nindent 4 }}
{{- end -}}
{{- end }}