디렉토리 구조 및 각 서비스 추가
This commit is contained in:
35
helm/teleport-cluster/templates/NOTES.txt
Normal file
35
helm/teleport-cluster/templates/NOTES.txt
Normal file
@@ -0,0 +1,35 @@
|
||||
{{- if .Values.highAvailability.certManager.enabled }}
|
||||
You have enabled cert-manager support in high availability mode.
|
||||
|
||||
There may be a short delay before Teleport pods start while an ACME certificate is issued.
|
||||
You can check the status of the certificate with `kubectl -n {{ .Release.Namespace }} describe certificate/{{ .Release.Name }}`
|
||||
|
||||
NOTE: For certificates to be provisioned, you must also install cert-manager (https://cert-manager.io/docs/) and configure an appropriate
|
||||
Issuer with access to your DNS provider to handle DNS01 challenges (https://cert-manager.io/docs/configuration/acme/dns01/#supported-dns01-providers)
|
||||
|
||||
For more information, please see the Helm guides in the Teleport docs (https://goteleport.com/docs/kubernetes-access/helm/guides/)
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.podSecurityPolicy.enabled (semverCompare "<1.23.0-0" .Capabilities.KubeVersion.Version) }}
|
||||
|
||||
SECURITY WARNING: Kubernetes 1.25 removes PodSecurityPolicy support and Helm
|
||||
doesn't support upgrading from 1.24 to 1.25 with PSPs enabled. Since version 12
|
||||
the `teleport-cluster` chart doesn't deploy PSPs on Kubernetes 1.23 or older.
|
||||
Instead, we recommend you to configure Pod Security AdmissionControllers for
|
||||
the namespace "{{.Release.Namespace}}" by adding the label
|
||||
`pod-security.kubernetes.io/enforce: baseline` on the namespace resource.
|
||||
|
||||
See https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-kubernetes-1-25-psp/
|
||||
|
||||
To remove this warning, explicitly set "podSecurityPolicy.enabled=false".
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.teleportVersionOverride }}
|
||||
|
||||
DANGER: `teleportVersionOverride` MUST NOT be used to control the Teleport version.
|
||||
This chart is designed to run Teleport version {{ .Chart.AppVersion }}.
|
||||
You will face compatibility issues trying to run a different Teleport version with it.
|
||||
|
||||
If you want to run Teleport version {{.Values.teleportVersionOverride}},
|
||||
you should use `helm --version {{.Values.teleportVersionOverride}}` instead.
|
||||
{{- end }}
|
||||
91
helm/teleport-cluster/templates/_helpers.tpl
Normal file
91
helm/teleport-cluster/templates/_helpers.tpl
Normal file
@@ -0,0 +1,91 @@
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
if serviceAccount is not defined or serviceAccount.name is empty, use .Release.Name
|
||||
*/}}
|
||||
{{- define "teleport-cluster.auth.serviceAccountName" -}}
|
||||
{{- coalesce .Values.serviceAccount.name .Release.Name -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.proxy.serviceAccountName" -}}
|
||||
{{- coalesce .Values.serviceAccount.name .Release.Name -}}-proxy
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.version" -}}
|
||||
{{- coalesce .Values.teleportVersionOverride .Chart.Version }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.majorVersion" -}}
|
||||
{{- (semver (include "teleport-cluster.version" .)).Major -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.previousMajorVersion" -}}
|
||||
{{- sub (include "teleport-cluster.majorVersion" . | atoi ) 1 -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Proxy selector labels */}}
|
||||
{{- define "teleport-cluster.proxy.selectorLabels" -}}
|
||||
app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}'
|
||||
app.kubernetes.io/instance: '{{ .Release.Name }}'
|
||||
app.kubernetes.io/component: 'proxy'
|
||||
{{- end -}}
|
||||
|
||||
{{/* Proxy all labels */}}
|
||||
{{- define "teleport-cluster.proxy.labels" -}}
|
||||
{{ include "teleport-cluster.proxy.selectorLabels" . }}
|
||||
helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}'
|
||||
app.kubernetes.io/managed-by: '{{ .Release.Service }}'
|
||||
app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}'
|
||||
teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}'
|
||||
{{- end -}}
|
||||
|
||||
{{/* Auth pods selector labels */}}
|
||||
{{- define "teleport-cluster.auth.selectorLabels" -}}
|
||||
app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}'
|
||||
app.kubernetes.io/instance: '{{ .Release.Name }}'
|
||||
app.kubernetes.io/component: 'auth'
|
||||
{{- end -}}
|
||||
|
||||
{{/* All pods all labels */}}
|
||||
{{- define "teleport-cluster.labels" -}}
|
||||
{{ include "teleport-cluster.selectorLabels" . }}
|
||||
helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}'
|
||||
app.kubernetes.io/managed-by: '{{ .Release.Service }}'
|
||||
app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}'
|
||||
teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}'
|
||||
{{- end -}}
|
||||
|
||||
{{/* All pods selector labels */}}
|
||||
{{- define "teleport-cluster.selectorLabels" -}}
|
||||
app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}'
|
||||
app.kubernetes.io/instance: '{{ .Release.Name }}'
|
||||
{{- end -}}
|
||||
|
||||
{{/* Auth pods all labels */}}
|
||||
{{- define "teleport-cluster.auth.labels" -}}
|
||||
{{ include "teleport-cluster.auth.selectorLabels" . }}
|
||||
helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}'
|
||||
app.kubernetes.io/managed-by: '{{ .Release.Service }}'
|
||||
app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}'
|
||||
teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}'
|
||||
{{- end -}}
|
||||
|
||||
{{/* ServiceNames are limited to 63 characters, we might have to truncate the ReleaseName
|
||||
to make sure the auth serviceName won't exceed this limit */}}
|
||||
{{- define "teleport-cluster.auth.serviceName" -}}
|
||||
{{- .Release.Name | trunc 58 | trimSuffix "-" -}}-auth
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.auth.currentVersionServiceName" -}}
|
||||
{{- .Release.Name | trunc 54 | trimSuffix "-" -}}-auth-v{{ include "teleport-cluster.majorVersion" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.auth.previousVersionServiceName" -}}
|
||||
{{- .Release.Name | trunc 54 | trimSuffix "-" -}}-auth-v{{ include "teleport-cluster.previousMajorVersion" . }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/* In most places we want to use the FQDN instead of relying on Kubernetes ndots behaviour
|
||||
for performance reasons */}}
|
||||
{{- define "teleport-cluster.auth.serviceFQDN" -}}
|
||||
{{ include "teleport-cluster.auth.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local
|
||||
{{- end -}}
|
||||
26
helm/teleport-cluster/templates/auth/_config.aws.tpl
Normal file
26
helm/teleport-cluster/templates/auth/_config.aws.tpl
Normal file
@@ -0,0 +1,26 @@
|
||||
{{- define "teleport-cluster.auth.config.aws" -}}
|
||||
{{ include "teleport-cluster.auth.config.common" . }}
|
||||
storage:
|
||||
type: dynamodb
|
||||
region: {{ required "aws.region is required in chart values" .Values.aws.region }}
|
||||
table_name: {{ required "aws.backendTable is required in chart values" .Values.aws.backendTable }}
|
||||
{{- if .Values.aws.auditLogMirrorOnStdout }}
|
||||
audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}', 'stdout://']
|
||||
{{- else }}
|
||||
audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}']
|
||||
{{- end }}
|
||||
audit_sessions_uri: s3://{{ required "aws.sessionRecordingBucket is required in chart values" .Values.aws.sessionRecordingBucket }}
|
||||
continuous_backups: {{ required "aws.backups is required in chart values" .Values.aws.backups }}
|
||||
{{- if .Values.aws.dynamoAutoScaling }}
|
||||
auto_scaling: true
|
||||
billing_mode: provisioned
|
||||
read_min_capacity: {{ required "aws.readMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMinCapacity }}
|
||||
read_max_capacity: {{ required "aws.readMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMaxCapacity }}
|
||||
read_target_value: {{ required "aws.readTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.readTargetValue }}
|
||||
write_min_capacity: {{ required "aws.writeMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMinCapacity }}
|
||||
write_max_capacity: {{ required "aws.writeMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMaxCapacity }}
|
||||
write_target_value: {{ required "aws.writeTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.writeTargetValue }}
|
||||
{{- else }}
|
||||
auto_scaling: false
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
38
helm/teleport-cluster/templates/auth/_config.azure.tpl
Normal file
38
helm/teleport-cluster/templates/auth/_config.azure.tpl
Normal file
@@ -0,0 +1,38 @@
|
||||
{{/* Helper to build the database connection string, adds paraneters if needed */}}
|
||||
{{- define "teleport-cluster.auth.config.azure.conn_string.query" }}
|
||||
{{- if .Values.azure.databasePoolMaxConnections -}}
|
||||
{{- printf "sslmode=verify-full&pool_max_conns=%v" .Values.azure.databasePoolMaxConnections -}}
|
||||
{{- else -}}
|
||||
sslmode=verify-full
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.auth.config.azure" -}}
|
||||
{{ include "teleport-cluster.auth.config.common" . }}
|
||||
storage:
|
||||
type: postgresql
|
||||
auth_mode: azure
|
||||
conn_string: {{ urlJoin (dict
|
||||
"scheme" "postgresql"
|
||||
"userinfo" .Values.azure.databaseUser
|
||||
"host" .Values.azure.databaseHost
|
||||
"path" .Values.azure.backendDatabase
|
||||
"query" (include "teleport-cluster.auth.config.azure.conn_string.query" .)
|
||||
) | toYaml }}
|
||||
audit_sessions_uri: {{ urlJoin (dict
|
||||
"scheme" "azblob"
|
||||
"host" .Values.azure.sessionRecordingStorageAccount
|
||||
) | toYaml }}
|
||||
audit_events_uri:
|
||||
- {{ urlJoin (dict
|
||||
"scheme" "postgresql"
|
||||
"userinfo" .Values.azure.databaseUser
|
||||
"host" .Values.azure.databaseHost
|
||||
"path" .Values.azure.auditLogDatabase
|
||||
"query" "sslmode=verify-full"
|
||||
"fragment" "auth_mode=azure"
|
||||
) | toYaml }}
|
||||
{{- if .Values.azure.auditLogMirrorOnStdout }}
|
||||
- "stdout://"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
65
helm/teleport-cluster/templates/auth/_config.common.tpl
Normal file
65
helm/teleport-cluster/templates/auth/_config.common.tpl
Normal file
@@ -0,0 +1,65 @@
|
||||
{{- define "teleport-cluster.auth.config.common" -}}
|
||||
{{- $authentication := mustMergeOverwrite .Values.authentication (default dict .Values.authenticationSecondFactor) -}}
|
||||
{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}}
|
||||
version: v3
|
||||
kubernetes_service:
|
||||
enabled: true
|
||||
listen_addr: 0.0.0.0:3026
|
||||
public_addr: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3026"
|
||||
{{- if .Values.kubeClusterName }}
|
||||
kube_cluster_name: {{ .Values.kubeClusterName }}
|
||||
{{- else }}
|
||||
kube_cluster_name: {{ .Values.clusterName }}
|
||||
{{- end }}
|
||||
{{- if .Values.labels }}
|
||||
labels: {{- toYaml .Values.labels | nindent 8 }}
|
||||
{{- end }}
|
||||
proxy_service:
|
||||
enabled: false
|
||||
ssh_service:
|
||||
enabled: false
|
||||
auth_service:
|
||||
enabled: true
|
||||
cluster_name: {{ required "clusterName is required in chart values" .Values.clusterName }}
|
||||
{{- if .Values.enterprise }}
|
||||
license_file: '/var/lib/license/license.pem'
|
||||
{{- end }}
|
||||
authentication:
|
||||
type: "{{ required "authentication.type is required in chart values" (coalesce .Values.authenticationType $authentication.type) }}"
|
||||
local_auth: {{ $authentication.localAuth }}
|
||||
{{- if $authentication.connectorName }}
|
||||
connector_name: "{{ $authentication.connectorName }}"
|
||||
{{- end }}
|
||||
{{- if $authentication.lockingMode }}
|
||||
locking_mode: "{{ $authentication.lockingMode }}"
|
||||
{{- end }}
|
||||
{{- if $authentication.secondFactor }}
|
||||
second_factor: "{{ $authentication.secondFactor }}"
|
||||
{{- if not (or (eq $authentication.secondFactor "off") (eq $authentication.secondFactor "otp")) }}
|
||||
webauthn:
|
||||
rp_id: {{ required "clusterName is required in chart values" .Values.clusterName }}
|
||||
{{- if $authentication.webauthn }}
|
||||
{{- if $authentication.webauthn.attestationAllowedCas }}
|
||||
attestation_allowed_cas: {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if $authentication.webauthn.attestationDeniedCas }}
|
||||
attestation_denied_cas: {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.sessionRecording }}
|
||||
session_recording: {{ .Values.sessionRecording }}
|
||||
{{- end }}
|
||||
{{- if .Values.proxyListenerMode }}
|
||||
proxy_listener_mode: {{ .Values.proxyListenerMode }}
|
||||
{{- end }}
|
||||
teleport:
|
||||
auth_server: 127.0.0.1:3025
|
||||
log:
|
||||
severity: {{ $logLevel }}
|
||||
output: {{ .Values.log.output }}
|
||||
format:
|
||||
output: {{ .Values.log.format }}
|
||||
extra_fields: {{ .Values.log.extraFields | toJson }}
|
||||
{{- end -}}
|
||||
16
helm/teleport-cluster/templates/auth/_config.gcp.tpl
Normal file
16
helm/teleport-cluster/templates/auth/_config.gcp.tpl
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- define "teleport-cluster.auth.config.gcp" -}}
|
||||
{{ include "teleport-cluster.auth.config.common" . }}
|
||||
storage:
|
||||
type: firestore
|
||||
project_id: {{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}
|
||||
collection_name: {{ required "gcp.backendTable is required in chart values" .Values.gcp.backendTable }}
|
||||
{{- if .Values.gcp.credentialSecretName }}
|
||||
credentials_path: /etc/teleport-secrets/gcp-credentials.json
|
||||
{{- end }}
|
||||
{{- if .Values.gcp.auditLogMirrorOnStdout }}
|
||||
audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}', 'stdout://']
|
||||
{{- else }}
|
||||
audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}']
|
||||
{{- end }}
|
||||
audit_sessions_uri: "gs://{{ required "gcp.sessionRecordingBucket is required in chart values" .Values.gcp.sessionRecordingBucket }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}"
|
||||
{{- end -}}
|
||||
12
helm/teleport-cluster/templates/auth/_config.scratch.tpl
Normal file
12
helm/teleport-cluster/templates/auth/_config.scratch.tpl
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- define "teleport-cluster.auth.config.scratch" -}}
|
||||
proxy_service:
|
||||
enabled: false
|
||||
ssh_service:
|
||||
enabled: false
|
||||
auth_service:
|
||||
enabled: true
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.auth.config.custom" -}}
|
||||
{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,3 @@
|
||||
{{- define "teleport-cluster.auth.config.standalone" -}}
|
||||
{{ include "teleport-cluster.auth.config.common" . }}
|
||||
{{- end -}}
|
||||
71
helm/teleport-cluster/templates/auth/clusterrole.yaml
Normal file
71
helm/teleport-cluster/templates/auth/clusterrole.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- users
|
||||
- groups
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- impersonate
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- "authorization.k8s.io"
|
||||
resources:
|
||||
- selfsubjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
|
||||
{{ if .Values.operator.enabled }}
|
||||
- apiGroups:
|
||||
- "resources.teleport.dev"
|
||||
resources:
|
||||
- teleportroles
|
||||
- teleportroles/status
|
||||
- teleportusers
|
||||
- teleportusers/status
|
||||
- teleportgithubconnectors
|
||||
- teleportgithubconnectors/status
|
||||
- teleportoidcconnectors
|
||||
- teleportoidcconnectors/status
|
||||
- teleportsamlconnectors
|
||||
- teleportsamlconnectors/status
|
||||
- teleportloginrules
|
||||
- teleportloginrules/status
|
||||
- teleportprovisiontokens
|
||||
- teleportprovisiontokens/status
|
||||
- teleportoktaimportrules
|
||||
- teleportoktaimportrules/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
31
helm/teleport-cluster/templates/auth/clusterrolebinding.yaml
Normal file
31
helm/teleport-cluster/templates/auth/clusterrolebinding.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ .Release.Name }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "teleport-cluster.auth.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
# This ClusterRoleBinding allows the auth service-account to validate Kubernetes tokens
|
||||
# This is required for proxies to join using their Kubernetes tokens
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-auth
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "teleport-cluster.auth.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
28
helm/teleport-cluster/templates/auth/config.yaml
Normal file
28
helm/teleport-cluster/templates/auth/config.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-auth
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
{{- if $auth.annotations.config }}
|
||||
annotations: {{- toYaml $auth.annotations.config | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
{{- if $auth.createProxyToken }}
|
||||
apply-on-startup.yaml: |2
|
||||
kind: token
|
||||
version: v2
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
expires: "2050-01-01T00:00:00Z"
|
||||
spec:
|
||||
roles: [Proxy]
|
||||
join_method: kubernetes
|
||||
kubernetes:
|
||||
allow:
|
||||
- service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}"
|
||||
{{- end }}
|
||||
teleport.yaml: |2
|
||||
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}}
|
||||
321
helm/teleport-cluster/templates/auth/deployment.yaml
Normal file
321
helm/teleport-cluster/templates/auth/deployment.yaml
Normal file
@@ -0,0 +1,321 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- $replicated := gt (int $auth.highAvailability.replicaCount) 1 -}}
|
||||
{{- $projectedServiceAccountToken := semverCompare ">=1.20.0-0" .Capabilities.KubeVersion.Version }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-auth
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
app: {{ .Release.Name }}
|
||||
{{- if $auth.annotations.deployment }}
|
||||
annotations: {{- toYaml $auth.annotations.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ $auth.highAvailability.replicaCount }}
|
||||
{{- if and $replicated $auth.highAvailability.minReadySeconds }}
|
||||
minReadySeconds: {{ $auth.highAvailability.minReadySeconds }}
|
||||
{{- end }}
|
||||
strategy:
|
||||
{{- if $replicated }}
|
||||
# some backends support a maximum amount of auth pods (e.g. DynamoDB),
|
||||
# we don't want to exceed this during a rollout.
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
{{- else }}
|
||||
# using a single replica can be because of a non-replicable storage or when applying upgrade migrations.
|
||||
# In those cases, we don't want a rolling update.
|
||||
type: Recreate
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ConfigMap checksum, to recreate the pod on config changes.
|
||||
checksum/config: {{ include (print $.Template.BasePath "/auth/config.yaml") . | sha256sum }}
|
||||
{{- if $auth.annotations.pod }}
|
||||
{{- toYaml $auth.annotations.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "teleport-cluster.auth.labels" . | nindent 8 }}
|
||||
app: {{ .Release.Name }}
|
||||
{{- if eq $auth.chartMode "azure"}}
|
||||
azure.workload.identity/use: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if $auth.nodeSelector }}
|
||||
nodeSelector: {{- toYaml $auth.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
affinity:
|
||||
{{- if $auth.affinity }}
|
||||
{{- if $auth.highAvailability.requireAntiAffinity }}
|
||||
{{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }}
|
||||
{{- end }}
|
||||
{{- toYaml $auth.affinity | nindent 8 }}
|
||||
{{- else }}
|
||||
podAntiAffinity:
|
||||
{{- if $auth.highAvailability.requireAntiAffinity }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- auth
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- else if $replicated }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- auth
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $auth.tolerations }}
|
||||
tolerations: {{- toYaml $auth.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $auth.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml $auth.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $auth.initContainers }}
|
||||
initContainers:
|
||||
{{- range $initContainer := $auth.initContainers }}
|
||||
{{- if and (not $initContainer.resources) $auth.resources }}
|
||||
{{- $_ := set $initContainer "resources" $auth.resources }}
|
||||
{{- end }}
|
||||
{{- list $initContainer | toYaml | nindent 8 }}
|
||||
{{- /* Note: this will break if the user sets volumeMounts to its initContainer */}}
|
||||
volumeMounts:
|
||||
{{- if $auth.enterprise }}
|
||||
- mountPath: /var/lib/license
|
||||
name: "license"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }}
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "gcp-credentials"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: auth-serviceaccount-token
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $auth.extraVolumeMounts }}
|
||||
{{- toYaml $auth.extraVolumeMounts | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: "teleport"
|
||||
image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
|
||||
imagePullPolicy: {{ $auth.imagePullPolicy }}
|
||||
{{- if or $auth.extraEnv $auth.tls.existingCASecretName }}
|
||||
env:
|
||||
{{- if (gt (len $auth.extraEnv) 0) }}
|
||||
{{- toYaml $auth.extraEnv | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $auth.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
args:
|
||||
- "--diag-addr=0.0.0.0:3000"
|
||||
{{- if $auth.insecureSkipProxyTLSVerify }}
|
||||
- "--insecure"
|
||||
{{- end }}
|
||||
{{- if $auth.createProxyToken }}
|
||||
- "--apply-on-startup=/etc/teleport/apply-on-startup.yaml"
|
||||
{{- end }}
|
||||
{{- if $auth.extraArgs }}
|
||||
{{- toYaml $auth.extraArgs | nindent 8 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: diag
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
- name: auth
|
||||
containerPort: 3025
|
||||
protocol: TCP
|
||||
- name: kube
|
||||
containerPort: 3026
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to start
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
|
||||
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to register
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s)
|
||||
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
|
||||
lifecycle:
|
||||
# waiting during preStop ensures no new request will hit the Terminating pod
|
||||
# on clusters using kube-proxy (kube-proxy syncs the node iptables rules every 30s)
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- teleport
|
||||
- wait
|
||||
- duration
|
||||
- 30s
|
||||
{{- if $auth.postStart.command }}
|
||||
postStart:
|
||||
exec:
|
||||
command: {{ toYaml $auth.postStart.command | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if $auth.resources }}
|
||||
resources:
|
||||
{{- toYaml $auth.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if $auth.securityContext }}
|
||||
securityContext: {{- toYaml $auth.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if $auth.enterprise }}
|
||||
- mountPath: /var/lib/license
|
||||
name: "license"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }}
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "gcp-credentials"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $auth.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: auth-serviceaccount-token
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $auth.extraVolumeMounts }}
|
||||
{{- toYaml $auth.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- /* Operator uses '.Values' instead of '$auth' as it will likely be moved out of the auth pods */}}
|
||||
{{- if .Values.operator.enabled }}
|
||||
- name: "operator"
|
||||
image: '{{ .Values.operator.image }}:{{ include "teleport-cluster.version" . }}'
|
||||
imagePullPolicy: {{ .Values.imagePullPolicy }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
{{- if .Values.operator.resources }}
|
||||
resources: {{- toYaml .Values.operator.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: auth-serviceaccount-token
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
automountServiceAccountToken: false
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
# This projected token volume mimics the `automountServiceAccountToken`
|
||||
# behaviour but defaults to a 1h TTL instead of 1y.
|
||||
- name: auth-serviceaccount-token
|
||||
projected:
|
||||
sources:
|
||||
- serviceAccountToken:
|
||||
path: token
|
||||
- configMap:
|
||||
items:
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
name: kube-root-ca.crt
|
||||
- downwardAPI:
|
||||
items:
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if $auth.enterprise }}
|
||||
- name: license
|
||||
secret:
|
||||
secretName: "license"
|
||||
{{- end }}
|
||||
{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }}
|
||||
- name: gcp-credentials
|
||||
secret:
|
||||
secretName: {{ $auth.gcp.credentialSecretName | quote }}
|
||||
{{- end }}
|
||||
{{- if $auth.tls.existingCASecretName }}
|
||||
- name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: {{ $auth.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
- name: "config"
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-auth
|
||||
- name: "data"
|
||||
{{- if and ($auth.persistence.enabled) ( and (not (eq $auth.chartMode "gcp")) (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "azure"))) }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if $auth.persistence.existingClaimName }}{{ $auth.persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if $auth.extraVolumes }}
|
||||
{{- toYaml $auth.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $auth.priorityClassName }}
|
||||
priorityClassName: {{ $auth.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "teleport-cluster.auth.serviceAccountName" . }}
|
||||
terminationGracePeriodSeconds: {{ $auth.terminationGracePeriodSeconds }}
|
||||
17
helm/teleport-cluster/templates/auth/pdb.yaml
Normal file
17
helm/teleport-cluster/templates/auth/pdb.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- if $auth.highAvailability.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-auth
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
spec:
|
||||
minAvailable: {{ $auth.highAvailability.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
31
helm/teleport-cluster/templates/auth/predeploy_config.yaml
Normal file
31
helm/teleport-cluster/templates/auth/predeploy_config.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- if $auth.validateConfigOnDeploy }}
|
||||
{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-auth-test
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "4"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
data:
|
||||
{{- if $auth.createProxyToken }}
|
||||
apply-on-startup.yaml: |2
|
||||
kind: token
|
||||
version: v2
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
expires: "3000-01-01T00:00:00Z"
|
||||
spec:
|
||||
roles: [Proxy]
|
||||
join_method: kubernetes
|
||||
kubernetes:
|
||||
allow:
|
||||
- service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}"
|
||||
{{- end }}
|
||||
teleport.yaml: |2
|
||||
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}}
|
||||
{{- end }}
|
||||
103
helm/teleport-cluster/templates/auth/predeploy_job.yaml
Normal file
103
helm/teleport-cluster/templates/auth/predeploy_job.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- if $auth.validateConfigOnDeploy }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-auth-test
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "5"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
backoffLimit: 1
|
||||
template:
|
||||
spec:
|
||||
{{- if $auth.affinity }}
|
||||
affinity: {{- toYaml $auth.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $auth.tolerations }}
|
||||
tolerations: {{- toYaml $auth.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $auth.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml $auth.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: "teleport-config-check"
|
||||
image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
|
||||
imagePullPolicy: {{ $auth.imagePullPolicy }}
|
||||
{{- if $auth.resources }}
|
||||
resources:
|
||||
{{- toYaml $auth.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if or $auth.extraEnv $auth.tls.existingCASecretName }}
|
||||
env:
|
||||
{{- if (gt (len $auth.extraEnv) 0) }}
|
||||
{{- toYaml $auth.extraEnv | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $auth.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "teleport"
|
||||
- "configure"
|
||||
args:
|
||||
- "--test"
|
||||
- "/etc/teleport/teleport.yaml"
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext: {{- toYaml .Values.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if .Values.enterprise }}
|
||||
- mountPath: /var/lib/license
|
||||
name: "license"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }}
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "gcp-credentials"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if .Values.enterprise }}
|
||||
- name: license
|
||||
secret:
|
||||
secretName: "license"
|
||||
{{- end }}
|
||||
{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }}
|
||||
- name: gcp-credentials
|
||||
secret:
|
||||
secretName: {{ .Values.gcp.credentialSecretName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: {{ .Values.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
- name: "config"
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-auth-test
|
||||
- name: "data"
|
||||
emptyDir: {}
|
||||
{{- if .Values.extraVolumes }}
|
||||
{{- toYaml .Values.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
24
helm/teleport-cluster/templates/auth/pvc.yaml
Normal file
24
helm/teleport-cluster/templates/auth/pvc.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- if $auth.persistence.enabled }}
|
||||
{{/* Disable persistence for cloud modes */}}
|
||||
{{- if and (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "gcp")) (not (eq $auth.chartMode "azure")) }}
|
||||
{{/* No need to create a PVC if we reuse an existing claim */}}
|
||||
{{- if not $auth.persistence.existingClaimName }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
{{- if $auth.persistence.storageClassName }}
|
||||
storageClassName: {{ $auth.persistence.storageClassName }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ required "persistence.volumeSize is required in chart values" $auth.persistence.volumeSize }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "teleport-cluster.auth.previousVersionServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
spec:
|
||||
# This is a headless service. Resolving it will return the list of all auth pods running the previous major version
|
||||
# Proxies should not connect to auth pods from the previous major version
|
||||
# Proxy rollout should be held until this headLessService does not match pods anymore.
|
||||
clusterIP: "None"
|
||||
# Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for
|
||||
publishNotReadyAddresses: true
|
||||
selector:
|
||||
{{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }}
|
||||
teleport.dev/majorVersion: {{ include "teleport-cluster.previousMajorVersion" . | quote }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "teleport-cluster.auth.currentVersionServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
spec:
|
||||
# This is a headless service. Resolving it will return the list of all auth pods running the current major version
|
||||
clusterIP: "None"
|
||||
# Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for
|
||||
publishNotReadyAddresses: true
|
||||
selector:
|
||||
{{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }}
|
||||
teleport.dev/majorVersion: {{ include "teleport-cluster.majorVersion" . | quote }}
|
||||
21
helm/teleport-cluster/templates/auth/service.yaml
Normal file
21
helm/teleport-cluster/templates/auth/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "teleport-cluster.auth.serviceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
|
||||
{{- if $auth.annotations.service }}
|
||||
annotations: {{- toYaml $auth.annotations.service | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- name: auth
|
||||
port: 3025
|
||||
targetPort: 3025
|
||||
protocol: TCP
|
||||
- name: kube
|
||||
port: 3026
|
||||
targetPort: 3026
|
||||
protocol: TCP
|
||||
selector: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }}
|
||||
17
helm/teleport-cluster/templates/auth/serviceaccount.yaml
Normal file
17
helm/teleport-cluster/templates/auth/serviceaccount.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
|
||||
{{- if $auth.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "teleport-cluster.auth.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if or $auth.annotations.serviceAccount $auth.azure.clientID }}
|
||||
annotations:
|
||||
{{- if $auth.annotations.serviceAccount }}
|
||||
{{- toYaml $auth.annotations.serviceAccount | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $auth.azure.clientID }}
|
||||
azure.workload.identity/client-id: "{{ $auth.azure.clientID }}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
31
helm/teleport-cluster/templates/podmonitor.yaml
Normal file
31
helm/teleport-cluster/templates/podmonitor.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
{{- if.Values.podMonitor.enabled -}}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "teleport-cluster.labels" . | nindent 4 }}
|
||||
{{- with .Values.podMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: {{ .Release.Name }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels: {{- include "teleport-cluster.selectorLabels" . | nindent 6 }}
|
||||
podMetricsEndpoints:
|
||||
- port: diag
|
||||
path: /metrics
|
||||
{{- with .Values.podMonitor.interval }}
|
||||
interval: {{ . | quote }}
|
||||
{{- end }}
|
||||
podTargetLabels:
|
||||
- "app.kubernetes.io/name"
|
||||
- "app.kubernetes.io/instance"
|
||||
- "app.kubernetes.io/component"
|
||||
- "app.kubernetes.io/version"
|
||||
- "teleport.dev/majorVersion"
|
||||
{{- end }}
|
||||
3
helm/teleport-cluster/templates/proxy/_config.aws.tpl
Normal file
3
helm/teleport-cluster/templates/proxy/_config.aws.tpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{- define "teleport-cluster.proxy.config.aws" -}}
|
||||
{{ include "teleport-cluster.proxy.config.common" . }}
|
||||
{{- end -}}
|
||||
3
helm/teleport-cluster/templates/proxy/_config.azure.tpl
Normal file
3
helm/teleport-cluster/templates/proxy/_config.azure.tpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{- define "teleport-cluster.proxy.config.azure" -}}
|
||||
{{ include "teleport-cluster.proxy.config.common" . }}
|
||||
{{- end -}}
|
||||
76
helm/teleport-cluster/templates/proxy/_config.common.tpl
Normal file
76
helm/teleport-cluster/templates/proxy/_config.common.tpl
Normal file
@@ -0,0 +1,76 @@
|
||||
{{- define "teleport-cluster.proxy.config.common" -}}
|
||||
{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}}
|
||||
version: v3
|
||||
teleport:
|
||||
join_params:
|
||||
method: kubernetes
|
||||
token_name: "{{.Release.Name}}-proxy"
|
||||
auth_server: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3025"
|
||||
log:
|
||||
severity: {{ $logLevel }}
|
||||
output: {{ .Values.log.output }}
|
||||
format:
|
||||
output: {{ .Values.log.format }}
|
||||
extra_fields: {{ .Values.log.extraFields | toJson }}
|
||||
ssh_service:
|
||||
enabled: false
|
||||
auth_service:
|
||||
enabled: false
|
||||
proxy_service:
|
||||
enabled: true
|
||||
{{- if .Values.publicAddr }}
|
||||
public_addr: {{- toYaml .Values.publicAddr | nindent 8 }}
|
||||
{{- else }}
|
||||
public_addr: '{{ required "clusterName is required in chart values" .Values.clusterName }}:443'
|
||||
{{- end }}
|
||||
{{- if ne .Values.proxyListenerMode "multiplex" }}
|
||||
listen_addr: 0.0.0.0:3023
|
||||
{{- if .Values.sshPublicAddr }}
|
||||
ssh_public_addr: {{- toYaml .Values.sshPublicAddr | nindent 8 }}
|
||||
{{- end }}
|
||||
tunnel_listen_addr: 0.0.0.0:3024
|
||||
{{- if .Values.tunnelPublicAddr }}
|
||||
tunnel_public_addr: {{- toYaml .Values.tunnelPublicAddr | nindent 8 }}
|
||||
{{- end }}
|
||||
kube_listen_addr: 0.0.0.0:3026
|
||||
{{- if .Values.kubePublicAddr }}
|
||||
kube_public_addr: {{- toYaml .Values.kubePublicAddr | nindent 8 }}
|
||||
{{- end }}
|
||||
mysql_listen_addr: 0.0.0.0:3036
|
||||
{{- if .Values.mysqlPublicAddr }}
|
||||
mysql_public_addr: {{- toYaml .Values.mysqlPublicAddr | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.separatePostgresListener }}
|
||||
postgres_listen_addr: 0.0.0.0:5432
|
||||
{{- if .Values.postgresPublicAddr }}
|
||||
postgres_public_addr: {{- toYaml .Values.postgresPublicAddr | nindent 8 }}
|
||||
{{- else }}
|
||||
postgres_public_addr: {{ .Values.clusterName }}:5432
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.separateMongoListener }}
|
||||
mongo_listen_addr: 0.0.0.0:27017
|
||||
{{- if .Values.mongoPublicAddr }}
|
||||
mongo_public_addr: {{- toYaml .Values.mongoPublicAddr | nindent 8 }}
|
||||
{{- else }}
|
||||
mongo_public_addr: {{ .Values.clusterName }}:27017
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.highAvailability.certManager.enabled .Values.tls.existingSecretName }}
|
||||
https_keypairs:
|
||||
- key_file: /etc/teleport-tls/tls.key
|
||||
cert_file: /etc/teleport-tls/tls.crt
|
||||
https_keypairs_reload_interval: 12h
|
||||
{{- else if .Values.acme }}
|
||||
acme:
|
||||
enabled: {{ .Values.acme }}
|
||||
email: {{ required "acmeEmail is required in chart values" .Values.acmeEmail }}
|
||||
{{- if .Values.acmeURI }}
|
||||
uri: {{ .Values.acmeURI }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and .Values.ingress.enabled (semverCompare ">= 13.2.0-0" (include "teleport-cluster.version" .)) }}
|
||||
trust_x_forwarded_for: true
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
3
helm/teleport-cluster/templates/proxy/_config.gcp.tpl
Normal file
3
helm/teleport-cluster/templates/proxy/_config.gcp.tpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{- define "teleport-cluster.proxy.config.gcp" -}}
|
||||
{{ include "teleport-cluster.proxy.config.common" . }}
|
||||
{{- end -}}
|
||||
12
helm/teleport-cluster/templates/proxy/_config.scratch.tpl
Normal file
12
helm/teleport-cluster/templates/proxy/_config.scratch.tpl
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- define "teleport-cluster.proxy.config.scratch" -}}
|
||||
ssh_service:
|
||||
enabled: false
|
||||
auth_service:
|
||||
enabled: false
|
||||
proxy_service:
|
||||
enabled: true
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-cluster.proxy.config.custom" -}}
|
||||
{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,3 @@
|
||||
{{- define "teleport-cluster.proxy.config.standalone" -}}
|
||||
{{ include "teleport-cluster.proxy.config.common" . }}
|
||||
{{- end -}}
|
||||
27
helm/teleport-cluster/templates/proxy/certificate.yaml
Normal file
27
helm/teleport-cluster/templates/proxy/certificate.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- if $proxy.highAvailability.certManager.enabled }}
|
||||
{{- $domain := (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }}
|
||||
{{- $domainWildcard := printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }}
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
|
||||
spec:
|
||||
secretName: teleport-tls
|
||||
{{- if $proxy.highAvailability.certManager.addCommonName }}
|
||||
commonName: {{ quote $domain }}
|
||||
{{- end }}
|
||||
dnsNames:
|
||||
- {{ quote $domain }}
|
||||
- {{ quote $domainWildcard }}
|
||||
issuerRef:
|
||||
name: {{ required "highAvailability.certManager.issuerName is required in chart values" $proxy.highAvailability.certManager.issuerName }}
|
||||
kind: {{ required "highAvailability.certManager.issuerKind is required in chart values" $proxy.highAvailability.certManager.issuerKind }}
|
||||
group: {{ required "highAvailability.certManager.issuerGroup is required in chart values" $proxy.highAvailability.certManager.issuerGroup }}
|
||||
{{- with $proxy.annotations.certSecret }}
|
||||
secretTemplate:
|
||||
annotations: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
16
helm/teleport-cluster/templates/proxy/config.yaml
Normal file
16
helm/teleport-cluster/templates/proxy/config.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}}
|
||||
{{- if (contains ":" $proxy.clusterName) -}}
|
||||
{{- fail "clusterName must not contain a colon, you can override the cluster's public address with publicAddr" -}}
|
||||
{{- end -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if $proxy.annotations.config }}
|
||||
annotations: {{- toYaml $proxy.annotations.config | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
teleport.yaml: |2
|
||||
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}}
|
||||
307
helm/teleport-cluster/templates/proxy/deployment.yaml
Normal file
307
helm/teleport-cluster/templates/proxy/deployment.yaml
Normal file
@@ -0,0 +1,307 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- $replicable := or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName -}}
|
||||
{{- $projectedServiceAccountToken := semverCompare ">=1.20.0-0" .Capabilities.KubeVersion.Version }}
|
||||
# Deployment is {{ if not $replicable }}not {{end}}replicable
|
||||
{{- if and $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
|
||||
{{- fail "Cannot set both highAvailability.certManager.enabled and tls.existingSecretName, choose one or the other" }}
|
||||
{{- end }}
|
||||
{{- if and $proxy.acme $proxy.tls.existingSecretName }}
|
||||
{{- fail "Cannot set both acme.enabled and tls.existingSecretName, choose one or the other" }}
|
||||
{{- end }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
|
||||
{{- if $proxy.annotations.deployment }}
|
||||
annotations: {{- toYaml $proxy.annotations.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- /*
|
||||
If proxies cannot be replicated we use a single replica.
|
||||
By default we want to upgrade all users to at least 2 replicas, if they had a higher replica count we take it.
|
||||
If a user wants to force a single proxy, they can use the `proxy` specific override.
|
||||
|
||||
$proxySpecificHA is a hack to avoid .Values.proxy.highAvailability to be nil, which would cause a fail when
|
||||
accessing .Values.proxy.highAvailability.replicaCount.
|
||||
*/}}
|
||||
{{- if $replicable }}
|
||||
{{- $proxySpecificHA := default (dict) .Values.proxy.highAvailability }}
|
||||
{{- if $proxySpecificHA.replicaCount }}
|
||||
replicas: {{ $proxySpecificHA.replicaCount }}
|
||||
{{- else }}
|
||||
replicas: {{ max .Values.highAvailability.replicaCount 2 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.highAvailability.minReadySeconds }}
|
||||
minReadySeconds: {{ $proxy.highAvailability.minReadySeconds }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
replicas: 1
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ConfigMap checksum, to recreate the pod on config changes.
|
||||
checksum/config: {{ include (print $.Template.BasePath "/proxy/config.yaml") . | sha256sum }}
|
||||
{{- if $proxy.annotations.pod }}
|
||||
{{- toYaml $proxy.annotations.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- if $proxy.nodeSelector }}
|
||||
nodeSelector: {{- toYaml $proxy.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
affinity:
|
||||
{{- if $proxy.affinity }}
|
||||
{{- if $proxy.highAvailability.requireAntiAffinity }}
|
||||
{{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }}
|
||||
{{- end }}
|
||||
{{- toYaml $proxy.affinity | nindent 8 }}
|
||||
{{- else }}
|
||||
podAntiAffinity:
|
||||
{{- if $proxy.highAvailability.requireAntiAffinity }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- proxy
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- else if gt (int $proxy.highAvailability.replicaCount) 1 }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- proxy
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $proxy.tolerations }}
|
||||
tolerations: {{- toYaml $proxy.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml $proxy.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
# wait-auth-update is responsible for holding off the proxy rollout until all auths are running the
|
||||
# next major version in case of major upgrade.
|
||||
- name: wait-auth-update
|
||||
image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
|
||||
command:
|
||||
- teleport
|
||||
- wait
|
||||
- no-resolve
|
||||
- '{{ include "teleport-cluster.auth.previousVersionServiceName" . }}.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if $proxy.securityContext }}
|
||||
securityContext: {{- toYaml $proxy.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.initContainers }}
|
||||
{{- range $initContainer := $proxy.initContainers }}
|
||||
{{- if and (not $initContainer.resources) $proxy.resources }}
|
||||
{{- $_ := set $initContainer "resources" $proxy.resources }}
|
||||
{{- end }}
|
||||
{{- list $initContainer | toYaml | nindent 8 }}
|
||||
{{- /* Note: this will break if the user sets volumeMounts to its initContainer */}}
|
||||
volumeMounts:
|
||||
{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
|
||||
- mountPath: /etc/teleport-tls
|
||||
name: "teleport-tls"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if $proxy.extraVolumeMounts }}
|
||||
{{- toYaml $proxy.extraVolumeMounts | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: "teleport"
|
||||
image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
|
||||
imagePullPolicy: {{ $proxy.imagePullPolicy }}
|
||||
{{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }}
|
||||
env:
|
||||
{{- if (gt (len $proxy.extraEnv) 0) }}
|
||||
{{- toYaml $proxy.extraEnv | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
args:
|
||||
- "--diag-addr=0.0.0.0:3000"
|
||||
{{- if $proxy.insecureSkipProxyTLSVerify }}
|
||||
- "--insecure"
|
||||
{{- end }}
|
||||
{{- if $proxy.extraArgs }}
|
||||
{{- toYaml $proxy.extraArgs | nindent 8 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: tls
|
||||
containerPort: 3080
|
||||
protocol: TCP
|
||||
{{- if $proxy.enterprise }}
|
||||
- name: proxypeering
|
||||
containerPort: 3021
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if ne $proxy.proxyListenerMode "multiplex" }}
|
||||
- name: sshproxy
|
||||
containerPort: 3023
|
||||
protocol: TCP
|
||||
- name: sshtun
|
||||
containerPort: 3024
|
||||
protocol: TCP
|
||||
- name: kube
|
||||
containerPort: 3026
|
||||
protocol: TCP
|
||||
- name: mysql
|
||||
containerPort: 3036
|
||||
protocol: TCP
|
||||
{{- if $proxy.separatePostgresListener }}
|
||||
- name: postgres
|
||||
containerPort: 5432
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if $proxy.separateMongoListener }}
|
||||
- name: mongo
|
||||
containerPort: 27017
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: diag
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to start
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
|
||||
timeoutSeconds: {{ $proxy.probeTimeoutSeconds }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to register
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s)
|
||||
timeoutSeconds: {{ $proxy.probeTimeoutSeconds }}
|
||||
lifecycle:
|
||||
# waiting during preStop ensures no new request will hit the Terminating pod
|
||||
# on clusters using kube-proxy (kube-proxy syncs the node iptables rules every 30s)
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- teleport
|
||||
- wait
|
||||
- duration
|
||||
- 30s
|
||||
{{- if $proxy.postStart.command }}
|
||||
postStart:
|
||||
exec:
|
||||
command: {{ toYaml $proxy.postStart.command | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.resources }}
|
||||
resources:
|
||||
{{- toYaml $proxy.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.securityContext }}
|
||||
securityContext: {{- toYaml $proxy.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
|
||||
- mountPath: /etc/teleport-tls
|
||||
name: "teleport-tls"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $proxy.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: proxy-serviceaccount-token
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $proxy.extraVolumeMounts }}
|
||||
{{- toYaml $proxy.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
automountServiceAccountToken: false
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if $projectedServiceAccountToken }}
|
||||
# This projected token volume mimics the `automountServiceAccountToken`
|
||||
# behaviour but defaults to a 1h TTL instead of 1y.
|
||||
- name: proxy-serviceaccount-token
|
||||
projected:
|
||||
sources:
|
||||
- serviceAccountToken:
|
||||
path: token
|
||||
- configMap:
|
||||
items:
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
name: kube-root-ca.crt
|
||||
- downwardAPI:
|
||||
items:
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if $proxy.highAvailability.certManager.enabled }}
|
||||
- name: teleport-tls
|
||||
secret:
|
||||
secretName: teleport-tls
|
||||
{{- else if $proxy.tls.existingSecretName }}
|
||||
- name: teleport-tls
|
||||
secret:
|
||||
secretName: {{ $proxy.tls.existingSecretName }}
|
||||
{{- end }}
|
||||
{{- if $proxy.tls.existingCASecretName }}
|
||||
- name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: {{ $proxy.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
- name: "config"
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
- name: "data"
|
||||
emptyDir: {}
|
||||
{{- if $proxy.extraVolumes }}
|
||||
{{- toYaml $proxy.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.priorityClassName }}
|
||||
priorityClassName: {{ $proxy.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "teleport-cluster.proxy.serviceAccountName" . }}
|
||||
terminationGracePeriodSeconds: {{ $proxy.terminationGracePeriodSeconds }}
|
||||
57
helm/teleport-cluster/templates/proxy/ingress.yaml
Normal file
57
helm/teleport-cluster/templates/proxy/ingress.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- if (not (eq .Values.proxyListenerMode "multiplex")) -}}
|
||||
{{- fail "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/" -}}
|
||||
{{- end -}}
|
||||
{{- $publicAddr := coalesce .Values.publicAddr (list .Values.clusterName) -}}
|
||||
{{- /* Trim ports from all public addresses if present */ -}}
|
||||
{{- range $publicAddr -}}
|
||||
{{- $address := . -}}
|
||||
{{- if (contains ":" $address) -}}
|
||||
{{- $split := split ":" $address -}}
|
||||
{{- $address = $split._0 -}}
|
||||
{{- $publicAddr = append (mustWithout $publicAddr .) $address -}}
|
||||
{{- end -}}
|
||||
{{- $wildcard := printf "*.%s" $address -}}
|
||||
{{- /* Add wildcard versions of all public addresses to ingress, unless 1) suppressed or 2) wildcard version already exists */ -}}
|
||||
{{- if and (not $.Values.ingress.suppressAutomaticWildcards) (not (hasPrefix "*." $address)) (not (has $wildcard $publicAddr)) -}}
|
||||
{{- $publicAddr = append $publicAddr (printf "*.%s" $address) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
|
||||
{{- if $proxy.annotations.ingress }}
|
||||
annotations: {{- toYaml $proxy.annotations.ingress | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with $proxy.ingress.spec }}
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
tls:
|
||||
- hosts:
|
||||
{{- range $publicAddr }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- if $proxy.highAvailability.certManager.enabled }}
|
||||
secretName: teleport-tls
|
||||
{{- else if $proxy.tls.existingSecretName }}
|
||||
secretName: {{ $proxy.tls.existingSecretName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range $publicAddr }}
|
||||
- host: {{ quote . }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: {{ $.Release.Name }}
|
||||
port:
|
||||
number: 443
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
17
helm/teleport-cluster/templates/proxy/pdb.yaml
Normal file
17
helm/teleport-cluster/templates/proxy/pdb.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- if $proxy.highAvailability.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
|
||||
spec:
|
||||
minAvailable: {{ $proxy.highAvailability.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
16
helm/teleport-cluster/templates/proxy/predeploy_config.yaml
Normal file
16
helm/teleport-cluster/templates/proxy/predeploy_config.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- if $proxy.validateConfigOnDeploy }}
|
||||
{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy-test
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "4"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
data:
|
||||
teleport.yaml: |2
|
||||
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}}
|
||||
{{- end }}
|
||||
99
helm/teleport-cluster/templates/proxy/predeploy_job.yaml
Normal file
99
helm/teleport-cluster/templates/proxy/predeploy_job.yaml
Normal file
@@ -0,0 +1,99 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- if $proxy.validateConfigOnDeploy }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-proxy-test
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "5"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
backoffLimit: 1
|
||||
template:
|
||||
spec:
|
||||
{{- if $proxy.affinity }}
|
||||
affinity: {{- toYaml $proxy.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.tolerations }}
|
||||
tolerations: {{- toYaml $proxy.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml $proxy.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: "teleport"
|
||||
image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
|
||||
imagePullPolicy: {{ $proxy.imagePullPolicy }}
|
||||
{{- if $proxy.resources }}
|
||||
resources:
|
||||
{{- toYaml $proxy.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }}
|
||||
env:
|
||||
{{- if (gt (len $proxy.extraEnv) 0) }}
|
||||
{{- toYaml $proxy.extraEnv | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $proxy.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "teleport"
|
||||
- "configure"
|
||||
args:
|
||||
- "--test"
|
||||
- "/etc/teleport/teleport.yaml"
|
||||
{{- if $proxy.securityContext }}
|
||||
securityContext: {{- toYaml $proxy.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }}
|
||||
- mountPath: /etc/teleport-tls
|
||||
name: "teleport-tls"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if $proxy.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if $proxy.extraVolumeMounts }}
|
||||
{{- toYaml $proxy.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if $proxy.highAvailability.certManager.enabled }}
|
||||
- name: teleport-tls
|
||||
secret:
|
||||
secretName: teleport-tls
|
||||
# this avoids deadlock during initial setup
|
||||
optional: true
|
||||
{{- else if $proxy.tls.existingSecretName }}
|
||||
- name: teleport-tls
|
||||
secret:
|
||||
secretName: {{ $proxy.tls.existingSecretName }}
|
||||
{{- end }}
|
||||
{{- if $proxy.tls.existingCASecretName }}
|
||||
- name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: {{ $proxy.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
- name: "config"
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-proxy-test
|
||||
- name: "data"
|
||||
emptyDir: {}
|
||||
{{- if $proxy.extraVolumes }}
|
||||
{{- toYaml $proxy.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
70
helm/teleport-cluster/templates/proxy/service.yaml
Normal file
70
helm/teleport-cluster/templates/proxy/service.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- $backendProtocol := ternary "ssl" "tcp" (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-ssl-cert") -}}
|
||||
{{- /* Fail early if proxy service type is set to LoadBalancer when ingress.enabled=true */ -}}
|
||||
{{- if and $proxy.ingress.enabled (eq $proxy.service.type "LoadBalancer") -}}
|
||||
{{- fail "proxy.service.type must not be LoadBalancer when using an ingress - any load balancer should be provisioned by your ingress controller. Set proxy.service.type=ClusterIP instead" -}}
|
||||
{{- end -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }}
|
||||
{{- if (or ($proxy.annotations.service) (eq $proxy.chartMode "aws")) }}
|
||||
annotations:
|
||||
{{- if and (eq $proxy.chartMode "aws") (not $proxy.ingress.enabled) }}
|
||||
{{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-backend-protocol")}}
|
||||
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: {{ $backendProtocol }}
|
||||
{{- end }}
|
||||
{{- if not (or (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled") (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-attributes"))}}
|
||||
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
|
||||
{{- end }}
|
||||
{{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-type")}}
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: nlb
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $proxy.annotations.service }}
|
||||
{{- toYaml $proxy.annotations.service | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ default "LoadBalancer" $proxy.service.type }}
|
||||
{{- with $proxy.service.spec }}
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: tls
|
||||
port: 443
|
||||
targetPort: 3080
|
||||
protocol: TCP
|
||||
{{- if ne $proxy.proxyListenerMode "multiplex" }}
|
||||
- name: sshproxy
|
||||
port: 3023
|
||||
targetPort: 3023
|
||||
protocol: TCP
|
||||
- name: k8s
|
||||
port: 3026
|
||||
targetPort: 3026
|
||||
protocol: TCP
|
||||
- name: sshtun
|
||||
port: 3024
|
||||
targetPort: 3024
|
||||
protocol: TCP
|
||||
- name: mysql
|
||||
port: 3036
|
||||
targetPort: 3036
|
||||
protocol: TCP
|
||||
{{- if $proxy.separatePostgresListener }}
|
||||
- name: postgres
|
||||
port: 5432
|
||||
targetPort: 5432
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if $proxy.separateMongoListener }}
|
||||
- name: mongo
|
||||
port: 27017
|
||||
targetPort: 27017
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 4 }}
|
||||
11
helm/teleport-cluster/templates/proxy/serviceaccount.yaml
Normal file
11
helm/teleport-cluster/templates/proxy/serviceaccount.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}}
|
||||
{{- if $proxy.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "teleport-cluster.proxy.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if $proxy.annotations.serviceAccount }}
|
||||
annotations: {{- toYaml $proxy.annotations.serviceAccount | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
68
helm/teleport-cluster/templates/psp.yaml
Normal file
68
helm/teleport-cluster/templates/psp.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
{{/* PSPs are deprecated in 1.22 and removed in 1.25. However Helm doesn't handle their removal properly in 1.25
|
||||
We must remove them before 1.25 to ensure the Helm state doesn't corrupt. As this is a breaking change, this
|
||||
only applies to v12+ charts. v11 and below will only show a warning from the NOTES.txt.
|
||||
Users must use PSAs instead (beta in 1.23, GA in 1.25). The "teleport-cluster" chart runs in "baseline" mode */}}
|
||||
{{- if and .Values.podSecurityPolicy.enabled (semverCompare "<1.23.0-0" .Capabilities.KubeVersion.Version) -}}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: MustRunAs
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
runAsUser:
|
||||
rule: MustRunAsNonRoot
|
||||
fsGroup:
|
||||
rule: MustRunAs
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: true
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-psp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-psp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
Reference in New Issue
Block a user