디렉토리 구조 및 각 서비스 추가

This commit is contained in:
dsk-minchulahn
2024-01-03 17:29:11 +09:00
parent 98de2a7627
commit d601d0f259
1632 changed files with 207616 additions and 1 deletions

View File

@@ -0,0 +1,26 @@
{{- define "teleport-cluster.auth.config.aws" -}}
{{ include "teleport-cluster.auth.config.common" . }}
storage:
type: dynamodb
region: {{ required "aws.region is required in chart values" .Values.aws.region }}
table_name: {{ required "aws.backendTable is required in chart values" .Values.aws.backendTable }}
{{- if .Values.aws.auditLogMirrorOnStdout }}
audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}', 'stdout://']
{{- else }}
audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}']
{{- end }}
audit_sessions_uri: s3://{{ required "aws.sessionRecordingBucket is required in chart values" .Values.aws.sessionRecordingBucket }}
continuous_backups: {{ required "aws.backups is required in chart values" .Values.aws.backups }}
{{- if .Values.aws.dynamoAutoScaling }}
auto_scaling: true
billing_mode: provisioned
read_min_capacity: {{ required "aws.readMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMinCapacity }}
read_max_capacity: {{ required "aws.readMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMaxCapacity }}
read_target_value: {{ required "aws.readTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.readTargetValue }}
write_min_capacity: {{ required "aws.writeMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMinCapacity }}
write_max_capacity: {{ required "aws.writeMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMaxCapacity }}
write_target_value: {{ required "aws.writeTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.writeTargetValue }}
{{- else }}
auto_scaling: false
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,38 @@
{{/* Helper to build the database connection string, adds paraneters if needed */}}
{{- define "teleport-cluster.auth.config.azure.conn_string.query" }}
{{- if .Values.azure.databasePoolMaxConnections -}}
{{- printf "sslmode=verify-full&pool_max_conns=%v" .Values.azure.databasePoolMaxConnections -}}
{{- else -}}
sslmode=verify-full
{{- end -}}
{{- end -}}
{{- define "teleport-cluster.auth.config.azure" -}}
{{ include "teleport-cluster.auth.config.common" . }}
storage:
type: postgresql
auth_mode: azure
conn_string: {{ urlJoin (dict
"scheme" "postgresql"
"userinfo" .Values.azure.databaseUser
"host" .Values.azure.databaseHost
"path" .Values.azure.backendDatabase
"query" (include "teleport-cluster.auth.config.azure.conn_string.query" .)
) | toYaml }}
audit_sessions_uri: {{ urlJoin (dict
"scheme" "azblob"
"host" .Values.azure.sessionRecordingStorageAccount
) | toYaml }}
audit_events_uri:
- {{ urlJoin (dict
"scheme" "postgresql"
"userinfo" .Values.azure.databaseUser
"host" .Values.azure.databaseHost
"path" .Values.azure.auditLogDatabase
"query" "sslmode=verify-full"
"fragment" "auth_mode=azure"
) | toYaml }}
{{- if .Values.azure.auditLogMirrorOnStdout }}
- "stdout://"
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,65 @@
{{- define "teleport-cluster.auth.config.common" -}}
{{- $authentication := mustMergeOverwrite .Values.authentication (default dict .Values.authenticationSecondFactor) -}}
{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}}
version: v3
kubernetes_service:
enabled: true
listen_addr: 0.0.0.0:3026
public_addr: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3026"
{{- if .Values.kubeClusterName }}
kube_cluster_name: {{ .Values.kubeClusterName }}
{{- else }}
kube_cluster_name: {{ .Values.clusterName }}
{{- end }}
{{- if .Values.labels }}
labels: {{- toYaml .Values.labels | nindent 8 }}
{{- end }}
proxy_service:
enabled: false
ssh_service:
enabled: false
auth_service:
enabled: true
cluster_name: {{ required "clusterName is required in chart values" .Values.clusterName }}
{{- if .Values.enterprise }}
license_file: '/var/lib/license/license.pem'
{{- end }}
authentication:
type: "{{ required "authentication.type is required in chart values" (coalesce .Values.authenticationType $authentication.type) }}"
local_auth: {{ $authentication.localAuth }}
{{- if $authentication.connectorName }}
connector_name: "{{ $authentication.connectorName }}"
{{- end }}
{{- if $authentication.lockingMode }}
locking_mode: "{{ $authentication.lockingMode }}"
{{- end }}
{{- if $authentication.secondFactor }}
second_factor: "{{ $authentication.secondFactor }}"
{{- if not (or (eq $authentication.secondFactor "off") (eq $authentication.secondFactor "otp")) }}
webauthn:
rp_id: {{ required "clusterName is required in chart values" .Values.clusterName }}
{{- if $authentication.webauthn }}
{{- if $authentication.webauthn.attestationAllowedCas }}
attestation_allowed_cas: {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }}
{{- end }}
{{- if $authentication.webauthn.attestationDeniedCas }}
attestation_denied_cas: {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.sessionRecording }}
session_recording: {{ .Values.sessionRecording }}
{{- end }}
{{- if .Values.proxyListenerMode }}
proxy_listener_mode: {{ .Values.proxyListenerMode }}
{{- end }}
teleport:
auth_server: 127.0.0.1:3025
log:
severity: {{ $logLevel }}
output: {{ .Values.log.output }}
format:
output: {{ .Values.log.format }}
extra_fields: {{ .Values.log.extraFields | toJson }}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- define "teleport-cluster.auth.config.gcp" -}}
{{ include "teleport-cluster.auth.config.common" . }}
storage:
type: firestore
project_id: {{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}
collection_name: {{ required "gcp.backendTable is required in chart values" .Values.gcp.backendTable }}
{{- if .Values.gcp.credentialSecretName }}
credentials_path: /etc/teleport-secrets/gcp-credentials.json
{{- end }}
{{- if .Values.gcp.auditLogMirrorOnStdout }}
audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}', 'stdout://']
{{- else }}
audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}']
{{- end }}
audit_sessions_uri: "gs://{{ required "gcp.sessionRecordingBucket is required in chart values" .Values.gcp.sessionRecordingBucket }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}"
{{- end -}}

View File

@@ -0,0 +1,12 @@
{{- define "teleport-cluster.auth.config.scratch" -}}
proxy_service:
enabled: false
ssh_service:
enabled: false
auth_service:
enabled: true
{{- end -}}
{{- define "teleport-cluster.auth.config.custom" -}}
{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }}
{{- end -}}

View File

@@ -0,0 +1,3 @@
{{- define "teleport-cluster.auth.config.standalone" -}}
{{ include "teleport-cluster.auth.config.common" . }}
{{- end -}}

View File

@@ -0,0 +1,71 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Release.Name }}
rules:
- apiGroups:
- ""
resources:
- users
- groups
- serviceaccounts
verbs:
- impersonate
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- "authorization.k8s.io"
resources:
- selfsubjectaccessreviews
verbs:
- create
{{ if .Values.operator.enabled }}
- apiGroups:
- "resources.teleport.dev"
resources:
- teleportroles
- teleportroles/status
- teleportusers
- teleportusers/status
- teleportgithubconnectors
- teleportgithubconnectors/status
- teleportoidcconnectors
- teleportoidcconnectors/status
- teleportsamlconnectors
- teleportsamlconnectors/status
- teleportloginrules
- teleportloginrules/status
- teleportprovisiontokens
- teleportprovisiontokens/status
- teleportoktaimportrules
- teleportoktaimportrules/status
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,31 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ include "teleport-cluster.auth.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
# This ClusterRoleBinding allows the auth service-account to validate Kubernetes tokens
# This is required for proxies to join using their Kubernetes tokens
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Name }}-auth
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: {{ include "teleport-cluster.auth.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-auth
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
{{- if $auth.annotations.config }}
annotations: {{- toYaml $auth.annotations.config | nindent 4 }}
{{- end }}
data:
{{- if $auth.createProxyToken }}
apply-on-startup.yaml: |2
kind: token
version: v2
metadata:
name: {{ .Release.Name }}-proxy
expires: "2050-01-01T00:00:00Z"
spec:
roles: [Proxy]
join_method: kubernetes
kubernetes:
allow:
- service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}"
{{- end }}
teleport.yaml: |2
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}}

View File

@@ -0,0 +1,321 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- $replicated := gt (int $auth.highAvailability.replicaCount) 1 -}}
{{- $projectedServiceAccountToken := semverCompare ">=1.20.0-0" .Capabilities.KubeVersion.Version }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-auth
namespace: {{ .Release.Namespace }}
labels:
{{- include "teleport-cluster.auth.labels" . | nindent 4 }}
app: {{ .Release.Name }}
{{- if $auth.annotations.deployment }}
annotations: {{- toYaml $auth.annotations.deployment | nindent 4 }}
{{- end }}
spec:
replicas: {{ $auth.highAvailability.replicaCount }}
{{- if and $replicated $auth.highAvailability.minReadySeconds }}
minReadySeconds: {{ $auth.highAvailability.minReadySeconds }}
{{- end }}
strategy:
{{- if $replicated }}
# some backends support a maximum amount of auth pods (e.g. DynamoDB),
# we don't want to exceed this during a rollout.
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
{{- else }}
# using a single replica can be because of a non-replicable storage or when applying upgrade migrations.
# In those cases, we don't want a rolling update.
type: Recreate
{{- end }}
selector:
matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
# ConfigMap checksum, to recreate the pod on config changes.
checksum/config: {{ include (print $.Template.BasePath "/auth/config.yaml") . | sha256sum }}
{{- if $auth.annotations.pod }}
{{- toYaml $auth.annotations.pod | nindent 8 }}
{{- end }}
labels:
{{- include "teleport-cluster.auth.labels" . | nindent 8 }}
app: {{ .Release.Name }}
{{- if eq $auth.chartMode "azure"}}
azure.workload.identity/use: "true"
{{- end }}
spec:
{{- if $auth.nodeSelector }}
nodeSelector: {{- toYaml $auth.nodeSelector | nindent 8 }}
{{- end }}
affinity:
{{- if $auth.affinity }}
{{- if $auth.highAvailability.requireAntiAffinity }}
{{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }}
{{- end }}
{{- toYaml $auth.affinity | nindent 8 }}
{{- else }}
podAntiAffinity:
{{- if $auth.highAvailability.requireAntiAffinity }}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
- key: app.kubernetes.io/component
operator: In
values:
- auth
topologyKey: "kubernetes.io/hostname"
{{- else if $replicated }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
- key: app.kubernetes.io/component
operator: In
values:
- auth
topologyKey: "kubernetes.io/hostname"
{{- end }}
{{- end }}
{{- if $auth.tolerations }}
tolerations: {{- toYaml $auth.tolerations | nindent 6 }}
{{- end }}
{{- if $auth.imagePullSecrets }}
imagePullSecrets:
{{- toYaml $auth.imagePullSecrets | nindent 6 }}
{{- end }}
{{- if $auth.initContainers }}
initContainers:
{{- range $initContainer := $auth.initContainers }}
{{- if and (not $initContainer.resources) $auth.resources }}
{{- $_ := set $initContainer "resources" $auth.resources }}
{{- end }}
{{- list $initContainer | toYaml | nindent 8 }}
{{- /* Note: this will break if the user sets volumeMounts to its initContainer */}}
volumeMounts:
{{- if $auth.enterprise }}
- mountPath: /var/lib/license
name: "license"
readOnly: true
{{- end }}
{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }}
- mountPath: /etc/teleport-secrets
name: "gcp-credentials"
readOnly: true
{{- end }}
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if $projectedServiceAccountToken }}
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
{{- end }}
{{- if $auth.extraVolumeMounts }}
{{- toYaml $auth.extraVolumeMounts | nindent 10 }}
{{- end }}
{{- end }}
{{- end }}
containers:
- name: "teleport"
image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
imagePullPolicy: {{ $auth.imagePullPolicy }}
{{- if or $auth.extraEnv $auth.tls.existingCASecretName }}
env:
{{- if (gt (len $auth.extraEnv) 0) }}
{{- toYaml $auth.extraEnv | nindent 8 }}
{{- end }}
{{- if $auth.tls.existingCASecretName }}
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
{{- end }}
{{- end }}
args:
- "--diag-addr=0.0.0.0:3000"
{{- if $auth.insecureSkipProxyTLSVerify }}
- "--insecure"
{{- end }}
{{- if $auth.createProxyToken }}
- "--apply-on-startup=/etc/teleport/apply-on-startup.yaml"
{{- end }}
{{- if $auth.extraArgs }}
{{- toYaml $auth.extraArgs | nindent 8 }}
{{- end }}
ports:
- name: diag
containerPort: 3000
protocol: TCP
- name: auth
containerPort: 3025
protocol: TCP
- name: kube
containerPort: 3026
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5 # wait 5s for agent to start
periodSeconds: 5 # poll health every 5s
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
readinessProbe:
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5 # wait 5s for agent to register
periodSeconds: 5 # poll health every 5s
failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s)
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
lifecycle:
# waiting during preStop ensures no new request will hit the Terminating pod
# on clusters using kube-proxy (kube-proxy syncs the node iptables rules every 30s)
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
{{- if $auth.postStart.command }}
postStart:
exec:
command: {{ toYaml $auth.postStart.command | nindent 14 }}
{{- end }}
{{- if $auth.resources }}
resources:
{{- toYaml $auth.resources | nindent 10 }}
{{- end }}
{{- if $auth.securityContext }}
securityContext: {{- toYaml $auth.securityContext | nindent 10 }}
{{- end }}
volumeMounts:
{{- if $auth.enterprise }}
- mountPath: /var/lib/license
name: "license"
readOnly: true
{{- end }}
{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }}
- mountPath: /etc/teleport-secrets
name: "gcp-credentials"
readOnly: true
{{- end }}
{{- if $auth.tls.existingCASecretName }}
- mountPath: /etc/teleport-tls-ca
name: "teleport-tls-ca"
readOnly: true
{{- end }}
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if $projectedServiceAccountToken }}
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
{{- end }}
{{- if $auth.extraVolumeMounts }}
{{- toYaml $auth.extraVolumeMounts | nindent 8 }}
{{- end }}
{{- /* Operator uses '.Values' instead of '$auth' as it will likely be moved out of the auth pods */}}
{{- if .Values.operator.enabled }}
- name: "operator"
image: '{{ .Values.operator.image }}:{{ include "teleport-cluster.version" . }}'
imagePullPolicy: {{ .Values.imagePullPolicy }}
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
{{- if .Values.operator.resources }}
resources: {{- toYaml .Values.operator.resources | nindent 10 }}
{{- end }}
volumeMounts:
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if $projectedServiceAccountToken }}
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
{{- end }}
{{ end }}
{{- if $projectedServiceAccountToken }}
automountServiceAccountToken: false
{{- end }}
volumes:
{{- if $projectedServiceAccountToken }}
# This projected token volume mimics the `automountServiceAccountToken`
# behaviour but defaults to a 1h TTL instead of 1y.
- name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- if $auth.enterprise }}
- name: license
secret:
secretName: "license"
{{- end }}
{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }}
- name: gcp-credentials
secret:
secretName: {{ $auth.gcp.credentialSecretName | quote }}
{{- end }}
{{- if $auth.tls.existingCASecretName }}
- name: teleport-tls-ca
secret:
secretName: {{ $auth.tls.existingCASecretName }}
{{- end }}
- name: "config"
configMap:
name: {{ .Release.Name }}-auth
- name: "data"
{{- if and ($auth.persistence.enabled) ( and (not (eq $auth.chartMode "gcp")) (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "azure"))) }}
persistentVolumeClaim:
claimName: {{ if $auth.persistence.existingClaimName }}{{ $auth.persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }}
{{- else }}
emptyDir: {}
{{- end }}
{{- if $auth.extraVolumes }}
{{- toYaml $auth.extraVolumes | nindent 6 }}
{{- end }}
{{- if $auth.priorityClassName }}
priorityClassName: {{ $auth.priorityClassName }}
{{- end }}
serviceAccountName: {{ include "teleport-cluster.auth.serviceAccountName" . }}
terminationGracePeriodSeconds: {{ $auth.terminationGracePeriodSeconds }}

View File

@@ -0,0 +1,17 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- if $auth.highAvailability.podDisruptionBudget.enabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ .Release.Name }}-auth
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
spec:
minAvailable: {{ $auth.highAvailability.podDisruptionBudget.minAvailable }}
selector:
matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- if $auth.validateConfigOnDeploy }}
{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-auth-test
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "4"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
data:
{{- if $auth.createProxyToken }}
apply-on-startup.yaml: |2
kind: token
version: v2
metadata:
name: {{ .Release.Name }}-proxy
expires: "3000-01-01T00:00:00Z"
spec:
roles: [Proxy]
join_method: kubernetes
kubernetes:
allow:
- service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}"
{{- end }}
teleport.yaml: |2
{{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}}
{{- end }}

View File

@@ -0,0 +1,103 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- if $auth.validateConfigOnDeploy }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-auth-test
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "5"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
spec:
backoffLimit: 1
template:
spec:
{{- if $auth.affinity }}
affinity: {{- toYaml $auth.affinity | nindent 8 }}
{{- end }}
{{- if $auth.tolerations }}
tolerations: {{- toYaml $auth.tolerations | nindent 6 }}
{{- end }}
{{- if $auth.imagePullSecrets }}
imagePullSecrets:
{{- toYaml $auth.imagePullSecrets | nindent 6 }}
{{- end }}
restartPolicy: Never
containers:
- name: "teleport-config-check"
image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}'
imagePullPolicy: {{ $auth.imagePullPolicy }}
{{- if $auth.resources }}
resources:
{{- toYaml $auth.resources | nindent 10 }}
{{- end }}
{{- if or $auth.extraEnv $auth.tls.existingCASecretName }}
env:
{{- if (gt (len $auth.extraEnv) 0) }}
{{- toYaml $auth.extraEnv | nindent 8 }}
{{- end }}
{{- if $auth.tls.existingCASecretName }}
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
{{- end }}
{{- end }}
command:
- "teleport"
- "configure"
args:
- "--test"
- "/etc/teleport/teleport.yaml"
{{- if .Values.securityContext }}
securityContext: {{- toYaml .Values.securityContext | nindent 10 }}
{{- end }}
volumeMounts:
{{- if .Values.enterprise }}
- mountPath: /var/lib/license
name: "license"
readOnly: true
{{- end }}
{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }}
- mountPath: /etc/teleport-secrets
name: "gcp-credentials"
readOnly: true
{{- end }}
{{- if .Values.tls.existingCASecretName }}
- mountPath: /etc/teleport-tls-ca
name: "teleport-tls-ca"
readOnly: true
{{- end }}
- mountPath: /etc/teleport
name: "config"
readOnly: true
- mountPath: /var/lib/teleport
name: "data"
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 8 }}
{{- end }}
volumes:
{{- if .Values.enterprise }}
- name: license
secret:
secretName: "license"
{{- end }}
{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }}
- name: gcp-credentials
secret:
secretName: {{ .Values.gcp.credentialSecretName | quote }}
{{- end }}
{{- if .Values.tls.existingCASecretName }}
- name: teleport-tls-ca
secret:
secretName: {{ .Values.tls.existingCASecretName }}
{{- end }}
- name: "config"
configMap:
name: {{ .Release.Name }}-auth-test
- name: "data"
emptyDir: {}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- if $auth.persistence.enabled }}
{{/* Disable persistence for cloud modes */}}
{{- if and (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "gcp")) (not (eq $auth.chartMode "azure")) }}
{{/* No need to create a PVC if we reuse an existing claim */}}
{{- if not $auth.persistence.existingClaimName }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
spec:
accessModes:
- ReadWriteOnce
{{- if $auth.persistence.storageClassName }}
storageClassName: {{ $auth.persistence.storageClassName }}
{{- end }}
resources:
requests:
storage: {{ required "persistence.volumeSize is required in chart values" $auth.persistence.volumeSize }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,31 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "teleport-cluster.auth.previousVersionServiceName" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
spec:
# This is a headless service. Resolving it will return the list of all auth pods running the previous major version
# Proxies should not connect to auth pods from the previous major version
# Proxy rollout should be held until this headLessService does not match pods anymore.
clusterIP: "None"
# Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for
publishNotReadyAddresses: true
selector:
{{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }}
teleport.dev/majorVersion: {{ include "teleport-cluster.previousMajorVersion" . | quote }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "teleport-cluster.auth.currentVersionServiceName" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
spec:
# This is a headless service. Resolving it will return the list of all auth pods running the current major version
clusterIP: "None"
# Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for
publishNotReadyAddresses: true
selector:
{{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }}
teleport.dev/majorVersion: {{ include "teleport-cluster.majorVersion" . | quote }}

View File

@@ -0,0 +1,21 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "teleport-cluster.auth.serviceName" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }}
{{- if $auth.annotations.service }}
annotations: {{- toYaml $auth.annotations.service | nindent 4 }}
{{- end }}
spec:
ports:
- name: auth
port: 3025
targetPort: 3025
protocol: TCP
- name: kube
port: 3026
targetPort: 3026
protocol: TCP
selector: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,17 @@
{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}}
{{- if $auth.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "teleport-cluster.auth.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- if or $auth.annotations.serviceAccount $auth.azure.clientID }}
annotations:
{{- if $auth.annotations.serviceAccount }}
{{- toYaml $auth.annotations.serviceAccount | nindent 4 }}
{{- end }}
{{- if $auth.azure.clientID }}
azure.workload.identity/client-id: "{{ $auth.azure.clientID }}"
{{- end }}
{{- end -}}
{{- end }}