Teleport Chart 업데이트

This commit is contained in:
ByeonJungHun
2024-01-22 12:12:36 +09:00
parent fde2f5f8a7
commit 7c1afcf6d7
163 changed files with 15784 additions and 71 deletions

View File

@@ -0,0 +1,23 @@
## Unit tests for Helm charts
Helm chart unit tests run here using the [helm-unittest](https://github.com/quintush/helm-unittest/) Helm plugin.
*Note: there are multiple forks for the helm-unittest plugin.
They are not compatible and don't provide the same featureset (e.g. including templates from sub-directories).
Our tests rely on features and bugfixes that are only available on the quintush fork
(which seems to be the most maintained at the time of writing)*
If you get a snapshot error during your testing, you should verify that your changes intended to alter the output, then run
this command from the root of your Teleport checkout to update the snapshots:
```bash
make -C build.assets test-helm-update-snapshots
```
After this, re-run the tests to make sure everything is fine:
```bash
make -C build.assets test-helm
```
Commit the updated snapshots along with your changes.

View File

@@ -0,0 +1,66 @@
adds operator permissions to ClusterRole:
1: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: RELEASE-NAME
rules:
- apiGroups:
- ""
resources:
- users
- groups
- serviceaccounts
verbs:
- impersonate
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- authorization.k8s.io
resources:
- selfsubjectaccessreviews
verbs:
- create
- apiGroups:
- resources.teleport.dev
resources:
- teleportroles
- teleportroles/status
- teleportusers
- teleportusers/status
- teleportgithubconnectors
- teleportgithubconnectors/status
- teleportoidcconnectors
- teleportoidcconnectors/status
- teleportsamlconnectors
- teleportsamlconnectors/status
- teleportloginrules
- teleportloginrules/status
- teleportprovisiontokens
- teleportprovisiontokens/status
- teleportoktaimportrules
- teleportoktaimportrules/status
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,518 @@
should add an operator side-car when operator is enabled:
1: |
image: public.ecr.aws/gravitational/teleport-operator:13.3.9
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: operator
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
? should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName
is set and persistence.enabled is false
: 1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3000
name: diag
protocol: TCP
- containerPort: 3025
name: auth
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
serviceAccountName: RELEASE-NAME
terminationGracePeriodSeconds: 60
volumes:
- name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-auth
name: config
- emptyDir: {}
name: data
should provision initContainer correctly when set in values:
1: |
- args:
- echo test
image: alpine
name: teleport-init
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
- args:
- echo test2
image: alpine
name: teleport-init2
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
should set affinity when set in values:
1: |
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: gravitational.io/dedicated
operator: In
values:
- teleport
should set imagePullSecrets when set in values:
1: |
- name: myRegistryKeySecretName
should set nodeSelector when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3000
name: diag
protocol: TCP
- containerPort: 3025
name: auth
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
nodeSelector:
environment: security
role: bastion
serviceAccountName: RELEASE-NAME
terminationGracePeriodSeconds: 60
volumes:
- name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-auth
name: config
- name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
should set required affinity when highAvailability.requireAntiAffinity is set:
1: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- RELEASE-NAME
- key: app.kubernetes.io/component
operator: In
values:
- auth
topologyKey: kubernetes.io/hostname
should set resources when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3000
name: diag
protocol: TCP
- containerPort: 3025
name: auth
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
serviceAccountName: RELEASE-NAME
terminationGracePeriodSeconds: 60
volumes:
- name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-auth
name: config
- name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
should set securityContext when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3000
name: diag
protocol: TCP
- containerPort: 3025
name: auth
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 99
runAsNonRoot: true
runAsUser: 99
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
serviceAccountName: RELEASE-NAME
terminationGracePeriodSeconds: 60
volumes:
- name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-auth
name: config
- name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
should set tolerations when set in values:
1: |
- effect: NoExecute
key: dedicated
operator: Equal
value: teleport
- effect: NoSchedule
key: dedicated
operator: Equal
value: teleport
should use OSS image and not mount license when enterprise is not set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
image: public.ecr.aws/gravitational/teleport-distroless:12.2.1
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3000
name: diag
protocol: TCP
- containerPort: 3025
name: auth
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
serviceAccountName: RELEASE-NAME
terminationGracePeriodSeconds: 60
volumes:
- name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-auth
name: config
- name: data
persistentVolumeClaim:
claimName: RELEASE-NAME

View File

@@ -0,0 +1,55 @@
does not add additional wildcard publicAddrs when Ingress is enabled and a publicAddr already contains a wildcard:
1: |
- hosts:
- helm-lint.example.com
- '*.helm-lint.example.com'
- helm-lint-second-domain.example.com
- '*.helm-lint-second-domain.example.com'
does not set a wildcard of clusterName as a hostname when Ingress is enabled and ingress.suppressAutomaticWildcards is true:
1: |
- hosts:
- teleport.example.com
? does not set a wildcard of publicAddr as a hostname when Ingress is enabled, publicAddr
is set and ingress.suppressAutomaticWildcards is true
: 1: |
- hosts:
- helm-lint.example.com
does not set tls.secretName by default:
1: |
- hosts:
- teleport.example.com
- '*.teleport.example.com'
exposes all publicAddrs and wildcard publicAddrs as hostnames when Ingress is enabled and multiple publicAddrs are set:
1: |
- hosts:
- helm-lint.example.com
- helm-lint-second-domain.example.com
- '*.helm-lint.example.com'
- '*.helm-lint-second-domain.example.com'
sets the clusterName and wildcard of clusterName as hostnames when Ingress is enabled:
1: |
- hosts:
- teleport.example.com
- '*.teleport.example.com'
sets the publicAddr and wildcard of publicAddr as hostnames when Ingress is enabled and publicAddr is set:
1: |
- hosts:
- helm-lint.example.com
- '*.helm-lint.example.com'
sets tls.secretName the value of tls.existingSecretName when set:
1: |
- hosts:
- teleport.example.com
- '*.teleport.example.com'
secretName: helm-lint-tls-secret
sets tls.secretName when cert-manager is enabled:
1: |
- hosts:
- teleport.example.com
- '*.teleport.example.com'
secretName: teleport-tls
trims ports from publicAddr and uses it as the hostname when Ingress is enabled and publicAddr is set:
1: |
- hosts:
- helm-lint.example.com
- '*.helm-lint.example.com'

View File

@@ -0,0 +1,6 @@
should set imagePullSecrets on auth predeploy job when set in values:
1: |
- name: myRegistryKeySecretName
should set imagePullSecrets on proxy predeploy job when set in values:
1: |
- name: myRegistryKeySecretName

View File

@@ -0,0 +1,16 @@
should request a certificate for cluster name when cert-manager is enabled (cert-manager.yaml):
1: |
- test-cluster
- '*.test-cluster'
2: |
group: custom.cert-manager.io
kind: CustomClusterIssuer
name: custom
should request a certificate for cluster name when cert-manager is enabled (cert-secret.yaml):
1: |
- test-cluster
- '*.test-cluster'
2: |
group: cert-manager.io
kind: Issuer
name: letsencrypt

View File

@@ -0,0 +1,530 @@
generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled is not set:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: helm-test.example.com:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled=true:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
public_addr: helm-test.example.com:443
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled is not set:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: helm-test.example.com:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
generates a config with a clusterName containing a regular string:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: helm-test.example.com:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
generates a config with proxy_service.trust_x_forwarded_for=true when version = 14.0.0-rc.1 and ingress.enabled=true:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
public_addr: helm-test.example.com:443
trust_x_forwarded_for: true
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
generates a config with proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled=true:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
public_addr: helm-test.example.com:443
trust_x_forwarded_for: true
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for acme-on.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
acme:
email: test@email.com
enabled: true
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-acme-cluster:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for acme-uri-staging.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
acme:
email: test@email.com
enabled: true
uri: https://acme-staging-v02.api.letsencrypt.org/directory
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-acme-cluster:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for aws-ha-acme.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
https_keypairs:
- cert_file: /etc/teleport-tls/tls.crt
key_file: /etc/teleport-tls/tls.key
https_keypairs_reload_interval: 12h
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-aws-cluster:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for existing-tls-secret.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
https_keypairs:
- cert_file: /etc/teleport-tls/tls.crt
key_file: /etc/teleport-tls/tls.key
https_keypairs_reload_interval: 12h
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-cluster-name:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for log-basic.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-log-cluster:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: json
output: stderr
severity: INFO
version: v3
matches snapshot for log-extra.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-log-cluster:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- level
- timestamp
- component
- caller
output: json
output: /var/lib/teleport/test.log
severity: DEBUG
version: v3
matches snapshot for proxy-listener-mode-multiplex.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
public_addr: test-proxy-listener-mode:443
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for proxy-listener-mode-separate.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
public_addr: test-proxy-listener-mode:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for public-addresses.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
kube_public_addr:
- loadbalancer.example.com:3026
listen_addr: 0.0.0.0:3023
mongo_listen_addr: 0.0.0.0:27017
mongo_public_addr:
- loadbalancer.example.com:27017
mysql_listen_addr: 0.0.0.0:3036
mysql_public_addr:
- loadbalancer.example.com:3036
postgres_listen_addr: 0.0.0.0:5432
postgres_public_addr:
- loadbalancer.example.com:5432
public_addr:
- loadbalancer.example.com:443
ssh_public_addr:
- loadbalancer.example.com:3023
tunnel_listen_addr: 0.0.0.0:3024
tunnel_public_addr:
- loadbalancer.example.com:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for separate-mongo-listener.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mongo_listen_addr: 0.0.0.0:27017
mongo_public_addr: helm-lint:27017
mysql_listen_addr: 0.0.0.0:3036
public_addr: helm-lint:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3
matches snapshot for separate-postgres-listener.yaml:
1: |
|-
auth_service:
enabled: false
proxy_service:
enabled: true
kube_listen_addr: 0.0.0.0:3026
listen_addr: 0.0.0.0:3023
mysql_listen_addr: 0.0.0.0:3036
postgres_listen_addr: 0.0.0.0:5432
postgres_public_addr: helm-lint:5432
public_addr: helm-lint:443
tunnel_listen_addr: 0.0.0.0:3024
ssh_service:
enabled: false
teleport:
auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025
join_params:
method: kubernetes
token_name: RELEASE-NAME-proxy
log:
format:
extra_fields:
- timestamp
- level
- component
- caller
output: text
output: stderr
severity: INFO
version: v3

View File

@@ -0,0 +1,495 @@
should provision initContainer correctly when set in values:
1: |
- command:
- teleport
- wait
- no-resolve
- RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
name: wait-auth-update
- args:
- echo test
image: alpine
name: teleport-init
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- args:
- echo test2
image: alpine
name: teleport-init2
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
should set affinity when set in values:
1: |
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: gravitational.io/dedicated
operator: In
values:
- teleport
should set imagePullSecrets when set in values:
1: |
- name: myRegistryKeySecretName
should set nodeSelector when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3080
name: tls
protocol: TCP
- containerPort: 3023
name: sshproxy
protocol: TCP
- containerPort: 3024
name: sshtun
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
- containerPort: 3036
name: mysql
protocol: TCP
- containerPort: 3000
name: diag
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true
initContainers:
- command:
- teleport
- wait
- no-resolve
- RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
name: wait-auth-update
nodeSelector:
environment: security
role: bastion
serviceAccountName: RELEASE-NAME-proxy
terminationGracePeriodSeconds: 60
volumes:
- name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-proxy
name: config
- emptyDir: {}
name: data
should set required affinity when highAvailability.requireAntiAffinity is set:
1: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- RELEASE-NAME
- key: app.kubernetes.io/component
operator: In
values:
- proxy
topologyKey: kubernetes.io/hostname
should set resources when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3080
name: tls
protocol: TCP
- containerPort: 3023
name: sshproxy
protocol: TCP
- containerPort: 3024
name: sshtun
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
- containerPort: 3036
name: mysql
protocol: TCP
- containerPort: 3000
name: diag
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true
initContainers:
- command:
- teleport
- wait
- no-resolve
- RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
name: wait-auth-update
serviceAccountName: RELEASE-NAME-proxy
terminationGracePeriodSeconds: 60
volumes:
- name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-proxy
name: config
- emptyDir: {}
name: data
should set securityContext for initContainers when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3080
name: tls
protocol: TCP
- containerPort: 3023
name: sshproxy
protocol: TCP
- containerPort: 3024
name: sshtun
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
- containerPort: 3036
name: mysql
protocol: TCP
- containerPort: 3000
name: diag
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 99
runAsNonRoot: true
runAsUser: 99
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true
initContainers:
- command:
- teleport
- wait
- no-resolve
- RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
name: wait-auth-update
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 99
runAsNonRoot: true
runAsUser: 99
serviceAccountName: RELEASE-NAME-proxy
terminationGracePeriodSeconds: 60
volumes:
- name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-proxy
name: config
- emptyDir: {}
name: data
should set securityContext when set in values:
1: |
affinity:
podAntiAffinity: null
automountServiceAccountToken: false
containers:
- args:
- --diag-addr=0.0.0.0:3000
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- teleport
- wait
- duration
- 30s
livenessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
name: teleport
ports:
- containerPort: 3080
name: tls
protocol: TCP
- containerPort: 3023
name: sshproxy
protocol: TCP
- containerPort: 3024
name: sshtun
protocol: TCP
- containerPort: 3026
name: kube
protocol: TCP
- containerPort: 3036
name: mysql
protocol: TCP
- containerPort: 3000
name: diag
protocol: TCP
readinessProbe:
failureThreshold: 12
httpGet:
path: /readyz
port: diag
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 99
runAsNonRoot: true
runAsUser: 99
volumeMounts:
- mountPath: /etc/teleport
name: config
readOnly: true
- mountPath: /var/lib/teleport
name: data
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true
initContainers:
- command:
- teleport
- wait
- no-resolve
- RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local
image: public.ecr.aws/gravitational/teleport-distroless:13.3.9
name: wait-auth-update
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 99
runAsNonRoot: true
runAsUser: 99
serviceAccountName: RELEASE-NAME-proxy
terminationGracePeriodSeconds: 60
volumes:
- name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
- configMap:
name: RELEASE-NAME-proxy
name: config
- emptyDir: {}
name: data
should set tolerations when set in values:
1: |
- effect: NoExecute
key: dedicated
operator: Equal
value: teleport
- effect: NoSchedule
key: dedicated
operator: Equal
value: teleport

View File

@@ -0,0 +1,68 @@
does not expose separate listener ports by default when ingress.enabled=true:
1: |
- name: tls
port: 443
protocol: TCP
targetPort: 3080
does not expose separate listener ports when running in separate mode and ingress.enabled=true:
1: |
- name: tls
port: 443
protocol: TCP
targetPort: 3080
exposes a single port when running in multiplex mode:
1: |
- name: tls
port: 443
protocol: TCP
targetPort: 3080
exposes a single port when running in multiplex mode and ingress.enabled=true:
1: |
- name: tls
port: 443
protocol: TCP
targetPort: 3080
exposes separate listener ports by default:
1: |
- name: tls
port: 443
protocol: TCP
targetPort: 3080
- name: sshproxy
port: 3023
protocol: TCP
targetPort: 3023
- name: k8s
port: 3026
protocol: TCP
targetPort: 3026
- name: sshtun
port: 3024
protocol: TCP
targetPort: 3024
- name: mysql
port: 3036
protocol: TCP
targetPort: 3036
exposes separate listener ports when running in separate mode:
1: |
- name: tls
port: 443
protocol: TCP
targetPort: 3080
- name: sshproxy
port: 3023
protocol: TCP
targetPort: 3023
- name: k8s
port: 3026
protocol: TCP
targetPort: 3026
- name: sshtun
port: 3024
protocol: TCP
targetPort: 3024
- name: mysql
port: 3036
protocol: TCP
targetPort: 3036

View File

@@ -0,0 +1,62 @@
creates a PodSecurityPolicy when enabled in values and supported:
1: |
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
name: RELEASE-NAME
spec:
allowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- '*'
2: |
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: RELEASE-NAME-psp
namespace: NAMESPACE
rules:
- apiGroups:
- policy
resourceNames:
- RELEASE-NAME
resources:
- podsecuritypolicies
verbs:
- use
3: |
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: RELEASE-NAME-psp
namespace: NAMESPACE
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: RELEASE-NAME-psp
subjects:
- kind: ServiceAccount
name: RELEASE-NAME

View File

@@ -0,0 +1,19 @@
suite: Auth ClusterRole
templates:
- auth/clusterrole.yaml
tests:
- it: creates a ClusterRole
asserts:
- hasDocuments:
count: 1
- isKind:
of: ClusterRole
- it: adds operator permissions to ClusterRole
values:
- ../.lint/operator.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ClusterRole
- matchSnapshot: {}

View File

@@ -0,0 +1,20 @@
suite: Auth ClusterRoleBinding
templates:
- auth/clusterrolebinding.yaml
tests:
- it: creates a ClusterRoleBinding
asserts:
- hasDocuments:
count: 2
- isKind:
of: ClusterRoleBinding
- it: uses the provided serviceAccount name
values:
- ../.lint/service-account.yaml
asserts:
- contains:
path: subjects
any: true
content:
kind: ServiceAccount
name: "helm-lint"

View File

@@ -0,0 +1,512 @@
suite: ConfigMap
templates:
- auth/config.yaml
tests:
- it: matches snapshot for acme-off.yaml
values:
- ../.lint/acme-off.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for acme-on.yaml
values:
- ../.lint/acme-on.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for acme-uri-staging.yaml
values:
- ../.lint/acme-on.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: wears annotations (annotations.yaml)
values:
- ../.lint/annotations.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- equal:
path: metadata.annotations.kubernetes\.io/config
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/config-different
value: 2
- it: matches snapshot for auth-connector-name.yaml
values:
- ../.lint/auth-connector-name.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-disable-local.yaml
values:
- ../.lint/auth-disable-local.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-locking-mode.yaml
values:
- ../.lint/auth-locking-mode.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-passwordless.yaml
values:
- ../.lint/auth-passwordless.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-type.yaml
values:
- ../.lint/auth-type.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-type-legacy.yaml
values:
- ../.lint/auth-type-legacy.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-webauthn.yaml
values:
- ../.lint/auth-webauthn.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for auth-webauthn-legacy.yaml
values:
- ../.lint/auth-webauthn-legacy.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws.yaml
values:
- ../.lint/aws.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws-dynamodb-autoscaling.yaml
values:
- ../.lint/aws-dynamodb-autoscaling.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws-ha.yaml
values:
- ../.lint/aws-ha.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws-ha-acme.yaml
values:
- ../.lint/aws-ha-acme.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws-ha-antiaffinity.yaml
values:
- ../.lint/aws-ha-antiaffinity.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws-ha-log.yaml
values:
- ../.lint/aws-ha-log.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for existing-tls-secret.yaml
values:
- ../.lint/existing-tls-secret.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for existing-tls-secret-with-ca.yaml
values:
- ../.lint/existing-tls-secret-with-ca.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for gcp-ha-acme.yaml
values:
- ../.lint/gcp-ha-acme.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for gcp-ha-antiaffinity.yaml
values:
- ../.lint/gcp-ha-antiaffinity.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for gcp-ha-log.yaml
values:
- ../.lint/gcp-ha-log.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for gcp.yaml
values:
- ../.lint/gcp.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for initcontainers.yaml
values:
- ../.lint/initcontainers.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for kube-cluster-name.yaml
values:
- ../.lint/kube-cluster-name.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for log-basic.yaml
values:
- ../.lint/log-basic.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for log-extra.yaml
values:
- ../.lint/log-extra.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for log-legacy.yaml
values:
- ../.lint/log-legacy.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for priority-class-name.yaml
values:
- ../.lint/priority-class-name.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for proxy-listener-mode-multiplex.yaml
values:
- ../.lint/proxy-listener-mode-multiplex.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for proxy-listener-mode-separate.yaml
values:
- ../.lint/proxy-listener-mode-separate.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for service.yaml
values:
- ../.lint/service.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for separate-mongo-listener.yaml
values:
- ../.lint/separate-mongo-listener.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for separate-postgres-listener.yaml
values:
- ../.lint/separate-postgres-listener.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for public-addresses.yaml
values:
- ../.lint/public-addresses.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for session-recording.yaml
values:
- ../.lint/session-recording.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for standalone-customsize.yaml
values:
- ../.lint/standalone-customsize.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for standalone-existingpvc.yaml
values:
- ../.lint/standalone-existingpvc.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for tolerations.yaml
values:
- ../.lint/tolerations.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for version-override.yaml
values:
- ../.lint/version-override.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for volumes.yaml
values:
- ../.lint/volumes.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: adds a proxy token by default
set:
clusterName: teleport.example.com
asserts:
- notEqual:
path: data.apply-on-startup\.yaml
value: null
- matchSnapshot:
path: data.apply-on-startup\.yaml
- it: matches snapshot for azure.yaml
values:
- ../.lint/azure.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for azure.yaml without pool_max_conn
values:
- ../.lint/azure.yaml
set:
azure:
databasePoolMaxConnections: 0
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: sets "provisioned" billing mode when autoscaling is enabled
values:
- ../.lint/aws-dynamodb-autoscaling.yaml
asserts:
- matchRegex:
path: data.teleport\.yaml
pattern: 'billing_mode: provisioned'

View File

@@ -0,0 +1,826 @@
suite: Auth Deployment
templates:
- auth/deployment.yaml
- auth/config.yaml
tests:
- it: sets Statefulset annotations when specified
template: auth/deployment.yaml
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: metadata.annotations.kubernetes\.io/deployment
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/deployment-different
value: 3
- it: sets Pod annotations when specified
template: auth/deployment.yaml
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: spec.template.metadata.annotations.kubernetes\.io/pod
value: test-annotation
- equal:
path: spec.template.metadata.annotations.kubernetes\.io/pod-different
value: 4
- it: should not have more than one replica in standalone mode
template: auth/deployment.yaml
set:
chartMode: standalone
clusterName: helm-lint.example.com
asserts:
- equal:
path: spec.replicas
value: 1
- it: should have multiple replicas when replicaCount is set
template: auth/deployment.yaml
set:
chartMode: scratch
clusterName: helm-lint.example.com
highAvailability:
replicaCount: 3
asserts:
- equal:
path: spec.replicas
value: 3
- it: should set affinity when set in values
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: gravitational.io/dedicated
operator: In
values:
- teleport
asserts:
- isNotNull:
path: spec.template.spec.affinity
- matchSnapshot:
path: spec.template.spec.affinity
- it: should set nodeSelector when set in values
template: auth/deployment.yaml
set:
chartMode: scratch
clusterName: helm-lint.example.com
nodeSelector:
role: bastion
environment: security
asserts:
- isNotNull:
path: spec.template.spec.nodeSelector
- matchSnapshot:
path: spec.template.spec
- it: should set required affinity when highAvailability.requireAntiAffinity is set
template: auth/deployment.yaml
values:
- ../.lint/aws-ha-antiaffinity.yaml
asserts:
- isNotNull:
path: spec.template.spec.affinity
- isNotNull:
path: spec.template.spec.affinity.podAntiAffinity
- isNotNull:
path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
- matchSnapshot:
path: spec.template.spec.affinity
- it: should set tolerations when set in values
template: auth/deployment.yaml
values:
- ../.lint/tolerations.yaml
asserts:
- isNotNull:
path: spec.template.spec.tolerations
- matchSnapshot:
path: spec.template.spec.tolerations
- it: should set resources when set in values
template: auth/deployment.yaml
values:
- ../.lint/resources.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.containers[0].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.containers[0].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.containers[0].resources.requests.memory
value: 2Gi
- matchSnapshot:
path: spec.template.spec
- it: should set securityContext when set in values
template: auth/deployment.yaml
values:
- ../.lint/security-context.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
value: false
- equal:
path: spec.template.spec.containers[0].securityContext.privileged
value: false
- equal:
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
value: false
- equal:
path: spec.template.spec.containers[0].securityContext.runAsGroup
value: 99
- equal:
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
value: true
- equal:
path: spec.template.spec.containers[0].securityContext.runAsUser
value: 99
- matchSnapshot:
path: spec.template.spec
- it: should not set securityContext when is empty object (default value)
template: auth/deployment.yaml
values:
- ../.lint/security-context-empty.yaml
asserts:
- isNull:
path: spec.template.spec.containers[0].securityContext
# we can't use the dynamic chart version or appVersion as a variable in the tests,
# so we override it manually and check that gets set instead
# this saves us having to update the test every time we cut a new release
- it: should use enterprise image and mount license when enterprise is set in values
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
enterprise: true
teleportVersionOverride: 12.2.1
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: public.ecr.aws/gravitational/teleport-ent-distroless:12.2.1
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/lib/license
name: "license"
readOnly: true
- contains:
path: spec.template.spec.volumes
content:
name: license
secret:
secretName: license
- it: should use OSS image and not mount license when enterprise is not set in values
template: auth/deployment.yaml
set:
clusterName: helm-lint
teleportVersionOverride: 12.2.1
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: public.ecr.aws/gravitational/teleport-distroless:12.2.1
- notContains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/lib/license
name: "license"
readOnly: true
- notContains:
path: spec.template.spec.volumes
content:
name: license
secret:
secretName: license
- matchSnapshot:
path: spec.template.spec
- it: should mount GCP credentials in GCP mode
template: auth/deployment.yaml
values:
- ../.lint/gcp-ha.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-secrets
name: "gcp-credentials"
readOnly: true
- contains:
path: spec.template.spec.volumes
content:
name: gcp-credentials
secret:
secretName: teleport-gcp-credentials
- it: should not mount secret when credentialSecretName is blank in values
template: auth/deployment.yaml
values:
- ../.lint/gcp-ha-workload.yaml
asserts:
- notContains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-secrets
name: "gcp-credentials"
readOnly: true
- notContains:
path: spec.template.spec.volumes
content:
name: gcp-credentials
secret:
secretName: teleport-gcp-credentials
- it: should mount GCP credentials for initContainer in GCP mode
template: auth/deployment.yaml
values:
- ../.lint/gcp-ha.yaml
- ../.lint/initcontainers.yaml
asserts:
- contains:
path: spec.template.spec.initContainers[0].volumeMounts
content:
mountPath: /etc/teleport-secrets
name: "gcp-credentials"
readOnly: true
- it: should mount ConfigMap containing Teleport config
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport
name: "config"
readOnly: true
- contains:
path: spec.template.spec.volumes
content:
name: config
configMap:
name: RELEASE-NAME-auth
- it: should mount extraVolumes and extraVolumeMounts on container and initContainers
template: auth/deployment.yaml
values:
- ../.lint/volumes.yaml
- ../.lint/initcontainers.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /path/to/mount
name: my-mount
- contains:
path: spec.template.spec.initContainers[0].volumeMounts
content:
mountPath: /path/to/mount
name: my-mount
- contains:
path: spec.template.spec.initContainers[1].volumeMounts
content:
mountPath: /path/to/mount
name: my-mount
- contains:
path: spec.template.spec.volumes
content:
name: my-mount
secret:
secretName: mySecret
- it: should set imagePullPolicy when set in values
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
imagePullPolicy: Always
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: Always
- it: should set environment when extraEnv set in values
template: auth/deployment.yaml
values:
- ../.lint/extra-env.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: SOME_ENVIRONMENT_VARIABLE
value: "some-value"
- it: should set imagePullSecrets when set in values
template: auth/deployment.yaml
values:
- ../.lint/imagepullsecrets.yaml
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: myRegistryKeySecretName
- matchSnapshot:
path: spec.template.spec.imagePullSecrets
- it: should provision initContainer correctly when set in values
template: auth/deployment.yaml
values:
- ../.lint/initcontainers.yaml
- ../.lint/resources.yaml
- ../.lint/extra-env.yaml
asserts:
- contains:
path: spec.template.spec.initContainers[0].args
content: "echo test"
- equal:
path: spec.template.spec.initContainers[0].name
value: "teleport-init"
- equal:
path: spec.template.spec.initContainers[0].image
value: "alpine"
- equal:
path: spec.template.spec.initContainers[0].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.initContainers[0].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.initContainers[0].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.initContainers[0].resources.requests.memory
value: 2Gi
- contains:
path: spec.template.spec.initContainers[1].args
content: "echo test2"
- equal:
path: spec.template.spec.initContainers[1].name
value: "teleport-init2"
- equal:
path: spec.template.spec.initContainers[1].image
value: "alpine"
- equal:
path: spec.template.spec.initContainers[1].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.initContainers[1].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.initContainers[1].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.initContainers[1].resources.requests.memory
value: 2Gi
- matchSnapshot:
path: spec.template.spec.initContainers
- it: should add insecureSkipProxyTLSVerify to args when set in values
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
insecureSkipProxyTLSVerify: true
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--insecure"
- it: should expose diag port
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: diag
containerPort: 3000
protocol: TCP
- it: should expose auth port
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: auth
containerPort: 3025
protocol: TCP
- it: should expose kube port
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: kube
containerPort: 3026
protocol: TCP
- it: should set postStart command if set in values
template: auth/deployment.yaml
set:
clusterName: helm-lint.example.com
postStart:
command: ["/bin/echo", "test"]
asserts:
- equal:
path: spec.template.spec.containers[0].lifecycle.postStart.exec.command
value: ["/bin/echo", "test"]
- it: should add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is true
template: auth/deployment.yaml
set:
chartMode: standalone
clusterName: helm-lint.example.com
persistence:
enabled: true
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
- it: should not add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is false
template: auth/deployment.yaml
set:
chartMode: standalone
clusterName: helm-lint.example.com
persistence:
enabled: false
asserts:
- notContains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
- it: should add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is true
template: auth/deployment.yaml
set:
chartMode: scratch
clusterName: helm-lint.example.com
persistence:
enabled: true
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
- it: should not add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is false
template: auth/deployment.yaml
set:
chartMode: scratch
clusterName: helm-lint.example.com
persistence:
enabled: false
asserts:
- notContains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: RELEASE-NAME
- it: should add an operator side-car when operator is enabled
template: auth/deployment.yaml
values:
- ../.lint/operator.yaml
asserts:
- equal:
path: spec.template.spec.containers[1].name
value: operator
- matchSnapshot:
path: spec.template.spec.containers[1]
- it: should add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set and persistence.enabled is true
template: auth/deployment.yaml
values:
- ../.lint/standalone-existingpvc.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: teleport-storage
- it: should not add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set but persistence.enabled is false
template: auth/deployment.yaml
values:
- ../.lint/standalone-existingpvc.yaml
set:
persistence:
enabled: false
asserts:
- notContains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: teleport-storage
- it: should add named PersistentVolumeClaim as volume when in scratch mode and persistence.existingClaimName is set
template: auth/deployment.yaml
values:
- ../.lint/standalone-existingpvc.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: teleport-storage
- it: should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName is set and persistence.enabled is false
template: auth/deployment.yaml
values:
- ../.lint/standalone-existingpvc.yaml
set:
persistence:
enabled: false
asserts:
- notContains:
path: spec.template.spec.volumes
content:
name: data
persistentVolumeClaim:
claimName: teleport-storage
- matchSnapshot:
path: spec.template.spec
- it: should add emptyDir for data in AWS mode
template: auth/deployment.yaml
values:
- ../.lint/aws-ha.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: data
emptyDir: {}
- it: should add emptyDir for data in GCP mode
template: auth/deployment.yaml
values:
- ../.lint/gcp-ha.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: data
emptyDir: {}
- it: should set priorityClassName when set in values
template: auth/deployment.yaml
values:
- ../.lint/priority-class-name.yaml
asserts:
- equal:
path: spec.template.spec.priorityClassName
value: system-cluster-critical
- it: should set probeTimeoutSeconds when set in values
template: auth/deployment.yaml
values:
- ../.lint/probe-timeout-seconds.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds
value: 5
- equal:
path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds
value: 5
- it: should mount tls.existingCASecretName and set environment when set in values
template: auth/deployment.yaml
values:
- ../.lint/existing-tls-secret-with-ca.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls-ca
secret:
secretName: helm-lint-existing-tls-secret-ca
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls-ca
name: teleport-tls-ca
readOnly: true
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- it: should mount tls.existingCASecretName and set extra environment when set in values
template: auth/deployment.yaml
values:
- ../.lint/existing-tls-secret-with-ca.yaml
- ../.lint/extra-env.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls-ca
secret:
secretName: helm-lint-existing-tls-secret-ca
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls-ca
name: teleport-tls-ca
readOnly: true
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- contains:
path: spec.template.spec.containers[0].env
content:
name: SOME_ENVIRONMENT_VARIABLE
value: some-value
- it: should set minReadySeconds when replicaCount > 1
template: auth/deployment.yaml
set:
chartMode: scratch
highAvailability:
minReadySeconds: 60
replicaCount: 3
asserts:
- equal:
path: spec.minReadySeconds
value: 60
- it: should not set minReadySeconds when replicaCount = 1
template: auth/deployment.yaml
set:
chartMode: scratch
highAvailability:
minReadySeconds: 60
replicaCount: 1
asserts:
- equal:
path: spec.minReadySeconds
value: null
- it: should use Recreate strategy when replicaCount = 1
template: auth/deployment.yaml
set:
chartMode: scratch
highAvailability:
replicaCount: 1
asserts:
- equal:
path: spec.strategy.type
value: Recreate
- it: should not set strategy when replicaCount > 1
template: auth/deployment.yaml
set:
chartMode: scratch
highAvailability:
replicaCount: 2
asserts:
- equal:
path: spec.strategy.type
value: RollingUpdate
- it: should not perform surge rolling updates when replicaCount > 1
template: auth/deployment.yaml
set:
chartMode: scratch
highAvailability:
replicaCount: 2
asserts:
- equal:
path: spec.strategy.rollingUpdate.maxSurge
value: 0
- equal:
path: spec.strategy.rollingUpdate.maxUnavailable
value: 1
- it: mounts regular tokens on older Kubernetes versions
template: auth/deployment.yaml
set:
clusterName: helm-lint
operator:
enabled: true
capabilities:
majorVersion: 1
minorVersion: 18
asserts:
- notEqual:
path: spec.template.spec.automountServiceAccountToken
value: false
- notContains:
path: spec.template.spec.volumes
content:
name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- notContains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
- notContains:
path: spec.template.spec.containers[1].volumeMounts
content:
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
- it: mounts tokens through projected volumes on newer Kubernetes versions
template: auth/deployment.yaml
set:
clusterName: helm-lint
operator:
enabled: true
capabilities:
majorVersion: 1
minorVersion: 21
asserts:
- equal:
path: spec.template.spec.automountServiceAccountToken
value: false
- contains:
path: spec.template.spec.volumes
content:
name: auth-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
- contains:
path: spec.template.spec.containers[1].volumeMounts
content:
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: auth-serviceaccount-token
readOnly: true
- it: should add the azure workload identity label to auth pods in azure mode
template: auth/deployment.yaml
set:
chartMode: azure
clusterName: teleport.example.com
asserts:
- equal:
path: spec.template.metadata.labels.azure\.workload\.identity/use
value: "true"

View File

@@ -0,0 +1,23 @@
suite: Auth PodDisruptionBudget
templates:
- auth/pdb.yaml
tests:
- it: not should create a PDB when disabled in values
set:
highAvailability:
podDisruptionBudget:
enabled: false
asserts:
- hasDocuments:
count: 0
- it: should create a PDB when enabled in values (pdb.yaml)
values:
- ../.lint/pdb.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: PodDisruptionBudget
- equal:
path: spec.minAvailable
value: 2

View File

@@ -0,0 +1,87 @@
suite: Auth PersistentVolumeClaim
templates:
- auth/pvc.yaml
tests:
- it: creates a PersistentVolumeClaim when chartMode=standalone with default size
set:
chartMode: standalone
asserts:
- hasDocuments:
count: 1
- isKind:
of: PersistentVolumeClaim
- equal:
path: spec.resources.requests.storage
value: "10Gi"
- it: creates a PersistentVolumeClaim when chartMode=scratch
set:
chartMode: scratch
asserts:
- hasDocuments:
count: 1
- isKind:
of: PersistentVolumeClaim
- it: uses a custom size when set
values:
- ../.lint/standalone-customsize.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: PersistentVolumeClaim
- equal:
path: spec.resources.requests.storage
value: 50Gi
- it: uses a custom storage class when set
values:
- ../.lint/standalone-custom-storage-class.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: PersistentVolumeClaim
- equal:
path: spec.storageClassName
value: ebs-ssd
- it: does not create a PersistentVolumeClaim when chartMode=standalone and existingClaimName is not blank
set:
chartMode: standalone
persistence:
existingClaimName: test-claim
asserts:
- hasDocuments:
count: 0
- it: does not create a PersistentVolumeClaim when chartMode=scratch and existingClaimName is not blank
set:
chartMode: scratch
persistence:
existingClaimName: test-claim
asserts:
- hasDocuments:
count: 0
- it: does not create a PersistentVolumeClaim when chartMode=aws
set:
chartMode: aws
asserts:
- hasDocuments:
count: 0
- it: does not create a PersistentVolumeClaim when chartMode=gcp
set:
chartMode: gcp
asserts:
- hasDocuments:
count: 0
- it: does not create a PersistentVolumeClaim when chartMode=azure
set:
chartMode: azure
asserts:
- hasDocuments:
count: 0

View File

@@ -0,0 +1,32 @@
suite: Auth ServiceAccount
templates:
- auth/serviceaccount.yaml
tests:
- it: sets ServiceAccount annotations when specified
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: metadata.annotations.kubernetes\.io/serviceaccount
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/serviceaccount-different
value: 6
- it: changes ServiceAccount name when specified
values:
- ../.lint/service-account.yaml
asserts:
- equal:
path: metadata.name
value: "helm-lint"
- it: sets Azure client ID when set
set:
chartMode: azure
azure:
clientID: "1234"
asserts:
- equal:
path: metadata.annotations.azure\.workload\.identity/client-id
value: "1234"

View File

@@ -0,0 +1,538 @@
suite: Proxy Ingress
templates:
- proxy/ingress.yaml
tests:
- it: does not create an Ingress by default
set:
clusterName: teleport.example.com
asserts:
- hasDocuments:
count: 0
- it: creates an Ingress when ingress.enabled=true and proxyListenerMode=multiplex
values:
- ../.lint/ingress.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- it: fails to deploy an Ingress when ingress.enabled=true and proxyListenerMode is not set
values:
- ../.lint/ingress.yaml
set:
proxyListenerMode: ""
asserts:
- failedTemplate:
errorMessage: "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/"
- it: fails to deploy an Ingress when ingress.enabled=true and proxyListenerMode=separate
values:
- ../.lint/ingress.yaml
set:
proxyListenerMode: separate
asserts:
- failedTemplate:
errorMessage: "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/"
- it: wears annotations when set
values:
- ../.lint/ingress.yaml
set:
annotations:
ingress:
test-annotation: test-annotation-value
another-annotation: some-other-value
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- equal:
path: metadata.annotations.test-annotation
value: test-annotation-value
- equal:
path: metadata.annotations.another-annotation
value: some-other-value
- it: sets the clusterName and wildcard of clusterName as hostnames when Ingress is enabled
values:
- ../.lint/ingress.yaml
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "teleport.example.com"
- contains:
path: spec.tls
content:
hosts:
- "teleport.example.com"
- "*.teleport.example.com"
- equal:
path: spec.rules[0].host
value: "teleport.example.com"
- contains:
path: spec.rules
content:
host: "teleport.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- equal:
path: spec.rules[1].host
value: "*.teleport.example.com"
- contains:
path: spec.rules
content:
host: "*.teleport.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
- it: does not set a wildcard of clusterName as a hostname when Ingress is enabled and ingress.suppressAutomaticWildcards is true
values:
- ../.lint/ingress.yaml
set:
ingress:
suppressAutomaticWildcards: true
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "teleport.example.com"
- contains:
path: spec.tls
content:
hosts:
- "teleport.example.com"
- equal:
path: spec.rules[0].host
value: "teleport.example.com"
- contains:
path: spec.rules
content:
host: "teleport.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- notContains:
path: spec.rules
content:
host: "*.teleport.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
- it: sets the publicAddr and wildcard of publicAddr as hostnames when Ingress is enabled and publicAddr is set
values:
- ../.lint/ingress.yaml
set:
publicAddr: ["helm-lint.example.com"]
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "helm-lint.example.com"
- contains:
path: spec.tls
content:
hosts:
- "helm-lint.example.com"
- "*.helm-lint.example.com"
- equal:
path: spec.rules[0].host
value: helm-lint.example.com
- contains:
path: spec.rules
content:
host: "helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- equal:
path: spec.rules[1].host
value: "*.helm-lint.example.com"
- contains:
path: spec.rules
content:
host: "*.helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
- it: does not set a wildcard of publicAddr as a hostname when Ingress is enabled, publicAddr is set and ingress.suppressAutomaticWildcards is true
values:
- ../.lint/ingress.yaml
set:
publicAddr: ["helm-lint.example.com"]
ingress:
suppressAutomaticWildcards: true
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "helm-lint.example.com"
- contains:
path: spec.tls
content:
hosts:
- "helm-lint.example.com"
- equal:
path: spec.rules[0].host
value: helm-lint.example.com
- contains:
path: spec.rules
content:
host: "helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- notContains:
path: spec.rules
content:
host: "*.helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
- it: trims ports from publicAddr and uses it as the hostname when Ingress is enabled and publicAddr is set
values:
- ../.lint/ingress.yaml
set:
publicAddr: ["helm-lint.example.com:443"]
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "helm-lint.example.com"
- contains:
path: spec.tls
content:
hosts:
- "helm-lint.example.com"
- "*.helm-lint.example.com"
- equal:
path: spec.rules[0].host
value: "helm-lint.example.com"
- contains:
path: spec.rules
content:
host: helm-lint.example.com
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- equal:
path: spec.rules[1].host
value: "*.helm-lint.example.com"
- contains:
path: spec.rules
content:
host: "*.helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
- it: exposes all publicAddrs and wildcard publicAddrs as hostnames when Ingress is enabled and multiple publicAddrs are set
values:
- ../.lint/ingress.yaml
set:
publicAddr: ["helm-lint.example.com", "helm-lint-second-domain.example.com"]
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "helm-lint.example.com"
- equal:
path: spec.tls[0].hosts[1]
value: "helm-lint-second-domain.example.com"
- contains:
path: spec.tls
content:
hosts:
- "helm-lint.example.com"
- "helm-lint-second-domain.example.com"
- "*.helm-lint.example.com"
- "*.helm-lint-second-domain.example.com"
- equal:
path: spec.rules[0].host
value: "helm-lint.example.com"
- equal:
path: spec.rules[1].host
value: "helm-lint-second-domain.example.com"
- equal:
path: spec.rules[2].host
value: "*.helm-lint.example.com"
- equal:
path: spec.rules[3].host
value: "*.helm-lint-second-domain.example.com"
- contains:
path: spec.rules
content:
host: "helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- contains:
path: spec.rules
content:
host: "helm-lint-second-domain.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- contains:
path: spec.rules
content:
host: "*.helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- contains:
path: spec.rules
content:
host: "*.helm-lint-second-domain.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
# this is a very contrived example which wouldn't even work in reality
# it's just to test the logic in the hostname generation code
- it: does not add additional wildcard publicAddrs when Ingress is enabled and a publicAddr already contains a wildcard
values:
- ../.lint/ingress.yaml
set:
publicAddr: ["helm-lint.example.com", "*.helm-lint.example.com", "helm-lint-second-domain.example.com:443"]
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "helm-lint.example.com"
- equal:
path: spec.tls[0].hosts[1]
value: "*.helm-lint.example.com"
- equal:
path: spec.tls[0].hosts[2]
value: "helm-lint-second-domain.example.com"
- equal:
path: spec.tls[0].hosts[3]
value: "*.helm-lint-second-domain.example.com"
- contains:
path: spec.tls
content:
hosts:
- "helm-lint.example.com"
- "*.helm-lint.example.com"
- "helm-lint-second-domain.example.com"
- "*.helm-lint-second-domain.example.com"
- equal:
path: spec.rules[0].host
value: "helm-lint.example.com"
- equal:
path: spec.rules[1].host
value: "*.helm-lint.example.com"
- equal:
path: spec.rules[2].host
value: "helm-lint-second-domain.example.com"
- equal:
path: spec.rules[3].host
value: "*.helm-lint-second-domain.example.com"
- contains:
path: spec.rules
content:
host: "helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- contains:
path: spec.rules
content:
host: "*.helm-lint.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- contains:
path: spec.rules
content:
host: "helm-lint-second-domain.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- contains:
path: spec.rules
content:
host: "*.helm-lint-second-domain.example.com"
http:
paths:
- backend:
service:
name: RELEASE-NAME
port:
number: 443
path: /
pathType: Prefix
- matchSnapshot:
path: spec.tls
- it: sets spec when passed
values:
- ../.lint/ingress.yaml
set:
ingress:
spec:
ingressClassName: nginx
otherSpecStuff: lint
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- equal:
path: spec.ingressClassName
value: nginx
- equal:
path: spec.otherSpecStuff
value: lint
- it: does not set tls.secretName by default
values:
- ../.lint/ingress.yaml
asserts:
- isEmpty:
path: spec.tls[0].secretName
- matchSnapshot:
path: spec.tls
- it: sets tls.secretName when cert-manager is enabled
values:
- ../.lint/ingress.yaml
set:
highAvailability:
certManager:
enabled: true
asserts:
- equal:
path: spec.tls[0].secretName
value: teleport-tls
- matchSnapshot:
path: spec.tls
- it: sets tls.secretName the value of tls.existingSecretName when set
values:
- ../.lint/ingress.yaml
set:
tls:
existingSecretName: helm-lint-tls-secret
asserts:
- equal:
path: spec.tls[0].secretName
value: helm-lint-tls-secret
- matchSnapshot:
path: spec.tls

View File

@@ -0,0 +1,40 @@
suite: PodMonitor
templates:
- podmonitor.yaml
tests:
- it: does not create a PodMonitor by default
set:
clusterName: test-kube-cluster-name
asserts:
- hasDocuments:
count: 0
- it: creates a PodMonitor when enabled
set:
clusterName: test-kube-cluster-name
podMonitor:
enabled: true
asserts:
- hasDocuments:
count: 1
- isKind:
of: PodMonitor
- it: configures scrape interval if provided
set:
clusterName: test-kube-cluster-name
podMonitor:
enabled: true
interval: 2m
asserts:
- equal:
path: spec.podMetricsEndpoints[0].interval
value: 2m
- it: wears additional labels if provided
asserts:
- equal:
path: metadata.labels.prometheus
value: default
values:
- ../.lint/podmonitor.yaml

View File

@@ -0,0 +1,111 @@
suite: Pre-Deploy Config Test Hooks
templates:
- auth/predeploy_job.yaml
- auth/predeploy_config.yaml
- proxy/predeploy_job.yaml
- proxy/predeploy_config.yaml
tests:
- it: Deploys the auth-test config
template: auth/predeploy_config.yaml
set:
clusterName: helm-lint
asserts:
- containsDocument:
kind: ConfigMap
apiVersion: v1
name: RELEASE-NAME-auth-test
namespace: NAMESPACE
- it: Deploys the proxy-test config
template: proxy/predeploy_config.yaml
set:
clusterName: helm-lint
asserts:
- containsDocument:
kind: ConfigMap
apiVersion: v1
name: RELEASE-NAME-proxy-test
namespace: NAMESPACE
- it: Deploys the auth-test job
template: auth/predeploy_job.yaml
set:
clusterName: helm-lint
asserts:
- containsDocument:
kind: Job
apiVersion: batch/v1
name: RELEASE-NAME-auth-test
namespace: NAMESPACE
- it: Is executed as a pre-install and pre-upgrade hook
set:
clusterName: helm-lint
asserts:
- equal:
path: metadata.annotations.helm\.sh/hook
value: pre-install,pre-upgrade
- it: Does not render hooks when config validation is disabled
set:
clusterName: helm-lint
validateConfigOnDeploy: false
asserts:
- hasDocuments:
count: 0
- it: should set resources on auth predeploy job when set in values
template: auth/predeploy_job.yaml
values:
- ../.lint/resources.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.containers[0].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.containers[0].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.containers[0].resources.requests.memory
value: 2Gi
- it: should set resources on proxy predeploy job when set in values
template: proxy/predeploy_job.yaml
values:
- ../.lint/resources.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.containers[0].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.containers[0].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.containers[0].resources.requests.memory
value: 2Gi
- it: should set imagePullSecrets on proxy predeploy job when set in values
template: proxy/predeploy_job.yaml
values:
- ../.lint/imagepullsecrets.yaml
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: myRegistryKeySecretName
- matchSnapshot:
path: spec.template.spec.imagePullSecrets
- it: should set imagePullSecrets on auth predeploy job when set in values
template: auth/predeploy_job.yaml
values:
- ../.lint/imagepullsecrets.yaml
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: myRegistryKeySecretName
- matchSnapshot:
path: spec.template.spec.imagePullSecrets

View File

@@ -0,0 +1,29 @@
suite: Proxy Certificate
templates:
- proxy/certificate.yaml
tests:
- it: should request a certificate for cluster name when cert-manager is enabled (cert-manager.yaml)
values:
- ../.lint/cert-manager.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: Certificate
- matchSnapshot:
path: spec.dnsNames
- matchSnapshot:
path: spec.issuerRef
- it: should request a certificate for cluster name when cert-manager is enabled (cert-secret.yaml)
values:
- ../.lint/cert-secret.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: Certificate
- matchSnapshot:
path: spec.dnsNames
- matchSnapshot:
path: spec.issuerRef

View File

@@ -0,0 +1,235 @@
suite: ConfigMap
templates:
- proxy/config.yaml
tests:
- it: matches snapshot for log-basic.yaml
values:
- ../.lint/log-basic.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for log-extra.yaml
values:
- ../.lint/log-extra.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for public-addresses.yaml
values:
- ../.lint/public-addresses.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: wears annotations (annotations.yaml)
values:
- ../.lint/annotations.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- equal:
path: metadata.annotations.kubernetes\.io/config
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/config-different
value: 2
- it: matches snapshot for proxy-listener-mode-multiplex.yaml
values:
- ../.lint/proxy-listener-mode-multiplex.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for proxy-listener-mode-separate.yaml
values:
- ../.lint/proxy-listener-mode-separate.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for separate-mongo-listener.yaml
values:
- ../.lint/separate-mongo-listener.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for separate-postgres-listener.yaml
values:
- ../.lint/separate-postgres-listener.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for aws-ha-acme.yaml
values:
- ../.lint/aws-ha-acme.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for existing-tls-secret.yaml
values:
- ../.lint/existing-tls-secret.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for acme-on.yaml
values:
- ../.lint/acme-on.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: matches snapshot for acme-uri-staging.yaml
values:
- ../.lint/acme-uri-staging.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: generates a config with a clusterName containing a regular string
set:
clusterName: "helm-test.example.com"
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: fails when clusterName contains a regular string and a colon
set:
clusterName: "helm-test:cluster-1"
asserts:
- failedTemplate:
errorMessage: "clusterName must not contain a colon, you can override the cluster's public address with publicAddr"
- it: fails when clusterName contains a port
set:
clusterName: "helm-test.example.com:443"
asserts:
- failedTemplate:
errorMessage: "clusterName must not contain a colon, you can override the cluster's public address with publicAddr"
- it: generates a config with proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled=true
chart:
version: 13.2.0
values:
- ../.lint/ingress.yaml
set:
clusterName: "helm-test.example.com"
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: generates a config with proxy_service.trust_x_forwarded_for=true when version = 14.0.0-rc.1 and ingress.enabled=true
chart:
version: "14.0.0-rc.1"
values:
- ../.lint/ingress.yaml
set:
clusterName: "helm-test.example.com"
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled is not set
chart:
version: 13.2.0
set:
clusterName: "helm-test.example.com"
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled=true
chart:
version: 13.1.5
values:
- ../.lint/ingress.yaml
set:
clusterName: "helm-test.example.com"
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml
- it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled is not set
chart:
version: 13.1.5
set:
clusterName: "helm-test.example.com"
asserts:
- hasDocuments:
count: 1
- isKind:
of: ConfigMap
- matchSnapshot:
path: data.teleport\.yaml

View File

@@ -0,0 +1,899 @@
suite: Proxy Deployment
templates:
- proxy/deployment.yaml
- proxy/config.yaml
tests:
- it: sets Deployment annotations when specified
template: proxy/deployment.yaml
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: metadata.annotations.kubernetes\.io/deployment
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/deployment-different
value: 3
- it: sets Pod annotations when specified
template: proxy/deployment.yaml
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: spec.template.metadata.annotations.kubernetes\.io/pod
value: test-annotation
- equal:
path: spec.template.metadata.annotations.kubernetes\.io/pod-different
value: 4
- it: should not have more than one replica if no certificate is passed
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- equal:
path: spec.replicas
value: 1
- it: should have multiple replicas by default when a certificate is passed through a secret
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
tls:
existingSecretName: my-certs
asserts:
- equal:
path: spec.replicas
value: 2
- it: should have multiple replicas by default when certManager is configured
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
highAvailability:
certManager:
enabled: true
asserts:
- equal:
path: spec.replicas
value: 2
- it: should have multiple replicas when global replicaCount is set and a certificate is passed
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
highAvailability:
replicaCount: 3
certManager:
enabled: true
asserts:
- equal:
path: spec.replicas
value: 3
- it: should have a single replica when proxy-specific replicaCount is set to 1 and a cert is passed
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
highAvailability:
certManager:
enabled: true
proxy:
highAvailability:
replicaCount: 1
asserts:
- equal:
path: spec.replicas
value: 1
- it: should set affinity when set in values
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
highAvailability:
replicaCount: 3
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: gravitational.io/dedicated
operator: In
values:
- teleport
asserts:
- isNotNull:
path: spec.template.spec.affinity
- matchSnapshot:
path: spec.template.spec.affinity
- it: should set required affinity when highAvailability.requireAntiAffinity is set
template: proxy/deployment.yaml
values:
- ../.lint/aws-ha-antiaffinity.yaml
asserts:
- isNotNull:
path: spec.template.spec.affinity
- isNotNull:
path: spec.template.spec.affinity.podAntiAffinity
- isNotNull:
path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
- matchSnapshot:
path: spec.template.spec.affinity
- it: should set tolerations when set in values
template: proxy/deployment.yaml
values:
- ../.lint/tolerations.yaml
asserts:
- isNotNull:
path: spec.template.spec.tolerations
- matchSnapshot:
path: spec.template.spec.tolerations
- it: should set resources when set in values
template: proxy/deployment.yaml
values:
- ../.lint/resources.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.containers[0].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.containers[0].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.containers[0].resources.requests.memory
value: 2Gi
- matchSnapshot:
path: spec.template.spec
- it: should set securityContext when set in values
template: proxy/deployment.yaml
values:
- ../.lint/security-context.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
value: false
- equal:
path: spec.template.spec.containers[0].securityContext.privileged
value: false
- equal:
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
value: false
- equal:
path: spec.template.spec.containers[0].securityContext.runAsGroup
value: 99
- equal:
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
value: true
- equal:
path: spec.template.spec.containers[0].securityContext.runAsUser
value: 99
- matchSnapshot:
path: spec.template.spec
- it: should not set securityContext when is empty object (default value)
template: proxy/deployment.yaml
values:
- ../.lint/security-context-empty.yaml
asserts:
- isNull:
path: spec.template.spec.containers[0].securityContext
- it: should set securityContext for initContainers when set in values
template: proxy/deployment.yaml
values:
- ../.lint/security-context.yaml
asserts:
- equal:
path: spec.template.spec.initContainers[0].securityContext.allowPrivilegeEscalation
value: false
- equal:
path: spec.template.spec.initContainers[0].securityContext.privileged
value: false
- equal:
path: spec.template.spec.initContainers[0].securityContext.readOnlyRootFilesystem
value: false
- equal:
path: spec.template.spec.initContainers[0].securityContext.runAsGroup
value: 99
- equal:
path: spec.template.spec.initContainers[0].securityContext.runAsNonRoot
value: true
- equal:
path: spec.template.spec.initContainers[0].securityContext.runAsUser
value: 99
- matchSnapshot:
path: spec.template.spec
- it: should not set securityContext for initContainers when is empty object (default value)
template: proxy/deployment.yaml
values:
- ../.lint/security-context-empty.yaml
asserts:
- isNull:
path: spec.template.spec.initContainers[0].securityContext
# we can't use the dynamic chart version or appVersion as a variable in the tests,
# so we override it manually and check that gets set instead
# this saves us having to update the test every time we cut a new release
- it: should use enterprise image when enterprise is set in values
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
enterprise: true
teleportVersionOverride: 12.2.1
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: public.ecr.aws/gravitational/teleport-ent-distroless:12.2.1
- it: should use OSS image when enterprise is not set in values
template: proxy/deployment.yaml
set:
clusterName: helm-lint
teleportVersionOverride: 12.2.1
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: public.ecr.aws/gravitational/teleport-distroless:12.2.1
- it: should mount TLS certs when cert-manager is enabled
template: proxy/deployment.yaml
values:
- ../.lint/gcp-ha-acme.yaml
- ../.lint/initcontainers.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls
name: "teleport-tls"
readOnly: true
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls
secret:
secretName: teleport-tls
- contains:
path: spec.template.spec.initContainers[1].volumeMounts
content:
mountPath: /etc/teleport-tls
name: "teleport-tls"
readOnly: true
- contains:
path: spec.template.spec.initContainers[2].volumeMounts
content:
mountPath: /etc/teleport-tls
name: "teleport-tls"
readOnly: true
- it: should mount ConfigMap containing Teleport config
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport
name: "config"
readOnly: true
- contains:
path: spec.template.spec.volumes
content:
name: config
configMap:
name: RELEASE-NAME-proxy
- it: should mount extraVolumes and extraVolumeMounts on container and initContainers
template: proxy/deployment.yaml
values:
- ../.lint/volumes.yaml
- ../.lint/initcontainers.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /path/to/mount
name: my-mount
- contains:
path: spec.template.spec.initContainers[1].volumeMounts
content:
mountPath: /path/to/mount
name: my-mount
- contains:
path: spec.template.spec.initContainers[2].volumeMounts
content:
mountPath: /path/to/mount
name: my-mount
- contains:
path: spec.template.spec.volumes
content:
name: my-mount
secret:
secretName: mySecret
- it: should set imagePullPolicy when set in values
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
imagePullPolicy: Always
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: Always
- it: should set environment when extraEnv set in values
template: proxy/deployment.yaml
values:
- ../.lint/extra-env.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: SOME_ENVIRONMENT_VARIABLE
value: "some-value"
- it: should set imagePullSecrets when set in values
template: proxy/deployment.yaml
values:
- ../.lint/imagepullsecrets.yaml
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: myRegistryKeySecretName
- matchSnapshot:
path: spec.template.spec.imagePullSecrets
- it: should provision initContainer correctly when set in values
template: proxy/deployment.yaml
values:
- ../.lint/initcontainers.yaml
- ../.lint/resources.yaml
- ../.lint/extra-env.yaml
asserts:
- contains:
path: spec.template.spec.initContainers[1].args
content: "echo test"
- equal:
path: spec.template.spec.initContainers[1].name
value: "teleport-init"
- equal:
path: spec.template.spec.initContainers[1].image
value: "alpine"
- equal:
path: spec.template.spec.initContainers[1].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.initContainers[1].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.initContainers[1].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.initContainers[1].resources.requests.memory
value: 2Gi
- contains:
path: spec.template.spec.initContainers[2].args
content: "echo test2"
- equal:
path: spec.template.spec.initContainers[2].name
value: "teleport-init2"
- equal:
path: spec.template.spec.initContainers[2].image
value: "alpine"
- equal:
path: spec.template.spec.initContainers[2].resources.limits.cpu
value: 2
- equal:
path: spec.template.spec.initContainers[2].resources.limits.memory
value: 4Gi
- equal:
path: spec.template.spec.initContainers[2].resources.requests.cpu
value: 1
- equal:
path: spec.template.spec.initContainers[2].resources.requests.memory
value: 2Gi
- matchSnapshot:
path: spec.template.spec.initContainers
- it: should add insecureSkipProxyTLSVerify to args when set in values
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
insecureSkipProxyTLSVerify: true
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--insecure"
- it: should expose diag port
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: diag
containerPort: 3000
protocol: TCP
- it: should expose tls port
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: tls
containerPort: 3080
protocol: TCP
- it: should expose tls port when proxyListenerMode is multiplex
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: tls
containerPort: 3080
protocol: TCP
- it: should not expose proxy peering port by default
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: proxypeering
containerPort: 3021
protocol: TCP
- it: should expose proxy peering port when enterprise is true
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
enterprise: true
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: proxypeering
containerPort: 3021
protocol: TCP
- it: should expose sshproxy port by default
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: sshproxy
containerPort: 3023
protocol: TCP
- it: should not expose sshproxy port when proxyListenerMode is multiplex
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: sshproxy
containerPort: 3023
protocol: TCP
- it: should expose sshtun port by default
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: sshtun
containerPort: 3024
protocol: TCP
- it: should not expose sshtun port when proxyListenerMode is multiplex
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: sshtun
containerPort: 3024
protocol: TCP
- it: should expose k8s port by default
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: kube
containerPort: 3026
protocol: TCP
- it: should not expose k8s port when proxyListenerMode is multiplex
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: kube
containerPort: 3026
protocol: TCP
- it: should expose mysql port by default
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: mysql
containerPort: 3036
protocol: TCP
- it: should not expose mysql port when proxyListenerMode is multiplex
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: mysql
containerPort: 3036
protocol: TCP
- it: should expose postgres port when separate postgres listener is enabled
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
separatePostgresListener: true
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: postgres
containerPort: 5432
protocol: TCP
- it: should not expose postgres port when proxyListenerMode is multiplex and separate postgres listener is enabled
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
separatePostgresListener: true
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: postgres
containerPort: 5432
protocol: TCP
- it: should expose mongo port when separate mongo listener is enabled
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
separateMongoListener: true
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: mongo
containerPort: 27017
protocol: TCP
- it: should not expose mongo port when when proxyListenerMode is multiplex and separate mongo listener is enabled
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
proxyListenerMode: multiplex
separateMongoListener: true
asserts:
- notContains:
path: spec.template.spec.containers[0].ports
content:
name: mongo
containerPort: 27017
protocol: TCP
- it: should set postStart command if set in values
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
postStart:
command: ["/bin/echo", "test"]
asserts:
- equal:
path: spec.template.spec.containers[0].lifecycle.postStart.exec.command
value: ["/bin/echo", "test"]
- it: should add and mount emptyDir for data
template: proxy/deployment.yaml
set:
clusterName: helm-lint.example.com
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/lib/teleport
name: data
- contains:
path: spec.template.spec.volumes
content:
name: data
emptyDir: {}
- it: should set priorityClassName when set in values
template: proxy/deployment.yaml
values:
- ../.lint/priority-class-name.yaml
asserts:
- equal:
path: spec.template.spec.priorityClassName
value: system-cluster-critical
- it: should set probeTimeoutSeconds when set in values
template: proxy/deployment.yaml
values:
- ../.lint/probe-timeout-seconds.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds
value: 5
- equal:
path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds
value: 5
- it: should not mount TLS secrets when when highAvailability.certManager.enabled is false and tls.existingSecretName is not set
template: proxy/deployment.yaml
set:
clusterName: helm-lint-test-cluster
asserts:
- notContains:
path: spec.template.spec.volumes
content:
name: teleport-tls
secret:
secretName: teleport-tls
- notContains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls
name: teleport-tls
readOnly: true
- it: should mount cert-manager TLS secret when highAvailability.certManager.enabled is true
template: proxy/deployment.yaml
values:
- ../.lint/cert-manager.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls
secret:
secretName: teleport-tls
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls
name: teleport-tls
readOnly: true
- it: should mount tls.existingSecretName when set in values
template: proxy/deployment.yaml
values:
- ../.lint/existing-tls-secret.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls
secret:
secretName: helm-lint-existing-tls-secret
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls
name: teleport-tls
readOnly: true
- it: should mount tls.existingCASecretName and set environment when set in values
template: proxy/deployment.yaml
values:
- ../.lint/existing-tls-secret-with-ca.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls-ca
secret:
secretName: helm-lint-existing-tls-secret-ca
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls-ca
name: teleport-tls-ca
readOnly: true
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- it: should mount tls.existingCASecretName and set extra environment when set in values
template: proxy/deployment.yaml
values:
- ../.lint/existing-tls-secret-with-ca.yaml
- ../.lint/extra-env.yaml
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: teleport-tls-ca
secret:
secretName: helm-lint-existing-tls-secret-ca
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /etc/teleport-tls-ca
name: teleport-tls-ca
readOnly: true
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- contains:
path: spec.template.spec.containers[0].env
content:
name: SOME_ENVIRONMENT_VARIABLE
value: some-value
- it: should set minReadySeconds when replicaCount > 1
template: proxy/deployment.yaml
set:
clusterName: helm-lint
highAvailability:
certManager:
enabled: true
replicaCount: 3
minReadySeconds: 60
asserts:
- equal:
path: spec.minReadySeconds
value: 60
- it: should not set minReadySeconds when replicaCount = 1
template: proxy/deployment.yaml
set:
chartMode: scratch
highAvailability:
minReadySeconds: 60
replicaCount: 1
asserts:
- equal:
path: spec.minReadySeconds
value: null
- it: should set nodeSelector when set in values
template: proxy/deployment.yaml
set:
chartMode: scratch
clusterName: helm-lint.example.com
nodeSelector:
role: bastion
environment: security
asserts:
- isNotNull:
path: spec.template.spec.nodeSelector
- matchSnapshot:
path: spec.template.spec
- it: mounts regular tokens on older Kubernetes versions
template: proxy/deployment.yaml
set:
clusterName: helm-lint
capabilities:
majorVersion: 1
minorVersion: 18
asserts:
- notEqual:
path: spec.template.spec.automountServiceAccountToken
value: false
- notContains:
path: spec.template.spec.volumes
content:
name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- notContains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true
- it: mounts tokens through projected volumes on newer Kubernetes versions
template: proxy/deployment.yaml
set:
clusterName: helm-lint
capabilities:
majorVersion: 1
minorVersion: 21
asserts:
- equal:
path: spec.template.spec.automountServiceAccountToken
value: false
- contains:
path: spec.template.spec.volumes
content:
name: proxy-serviceaccount-token
projected:
sources:
- serviceAccountToken:
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: proxy-serviceaccount-token
readOnly: true

View File

@@ -0,0 +1,23 @@
suite: Proxy PodDisruptionBudget
templates:
- proxy/pdb.yaml
tests:
- it: not should create a PDB when disabled in values
set:
highAvailability:
podDisruptionBudget:
enabled: false
asserts:
- hasDocuments:
count: 0
- it: should create a PDB when enabled in values (pdb.yaml)
values:
- ../.lint/pdb.yaml
asserts:
- hasDocuments:
count: 1
- isKind:
of: PodDisruptionBudget
- equal:
path: spec.minAvailable
value: 2

View File

@@ -0,0 +1,381 @@
suite: Proxy Service
templates:
- proxy/service.yaml
tests:
- it: uses a LoadBalancer by default
set:
clusterName: teleport.example.com
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: LoadBalancer
- it: uses a ClusterIP when service.type=ClusterIP
set:
clusterName: teleport.example.com
service:
type: ClusterIP
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: ClusterIP
- it: uses a ClusterIP when proxy.service.type=ClusterIP
set:
clusterName: teleport.example.com
service:
type: NodePort
proxy:
service:
type: ClusterIP
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: ClusterIP
- it: fails to deploy when ingress.enabled=true and proxy.service.type is set to LoadBalancer (default)
set:
clusterName: teleport.example.com
ingress:
enabled: true
asserts:
- failedTemplate:
errorMessage: "proxy.service.type must not be LoadBalancer when using an ingress - any load balancer should be provisioned by your ingress controller. Set proxy.service.type=ClusterIP instead"
- it: uses a ClusterIP when ingress.enabled=true and service.type=ClusterIP
set:
clusterName: teleport.example.com
ingress:
enabled: true
service:
type: ClusterIP
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: ClusterIP
- it: uses a ClusterIP when ingress.enabled=true and proxy.service.type=ClusterIP
set:
clusterName: teleport.example.com
ingress:
enabled: true
proxy:
service:
type: ClusterIP
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: ClusterIP
- it: uses a NodePort when ingress.enabled=true and proxy.service.type=NodePort
set:
clusterName: teleport.example.com
ingress:
enabled: true
proxy:
service:
type: NodePort
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: NodePort
- it: uses a NodePort when ingress.enabled=true and service.type=NodePort
set:
clusterName: teleport.example.com
ingress:
enabled: true
service:
type: NodePort
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: NodePort
- it: uses a NodePort when ingress.enabled=true and proxy.service.type is overridden
set:
clusterName: teleport.example.com
ingress:
enabled: true
proxy:
service:
type: NodePort
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: NodePort
- it: sets AWS annotations when chartMode=aws
set:
clusterName: teleport.example.com
chartMode: aws
asserts:
- hasDocuments:
count: 1
- isKind:
of: Service
- equal:
path: spec.type
value: LoadBalancer
- equal:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type
value: nlb
- equal:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol
value: tcp
- equal:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-cross-zone-load-balancing-enabled
value: "true"
- it: sets service annotations when specified
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: metadata.annotations.kubernetes\.io/service
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/service-different
value: 5
- it: adds a separate Postgres listener port when separatePostgresListener is true
values:
- ../.lint/separate-postgres-listener.yaml
asserts:
- contains:
path: spec.ports
content:
name: postgres
port: 5432
targetPort: 5432
protocol: TCP
- it: does not add a separate Postgres listener port when separatePostgresListener is true and ingress.enabled=true
values:
- ../.lint/separate-postgres-listener.yaml
set:
ingress:
enabled: true
proxyListenerMode: multiplex
service:
type: ClusterIP
asserts:
- notContains:
path: spec.ports
content:
name: postgres
port: 5432
targetPort: 5432
protocol: TCP
- it: adds a separate Mongo listener port when separateMongoListener is true
values:
- ../.lint/separate-mongo-listener.yaml
asserts:
- contains:
path: spec.ports
content:
name: mongo
port: 27017
targetPort: 27017
protocol: TCP
- it: does not add a separate Mongo listener port when separateMongoListener is true and ingress.enabled=true
values:
- ../.lint/separate-mongo-listener.yaml
set:
ingress:
enabled: true
proxyListenerMode: multiplex
service:
type: ClusterIP
asserts:
- notContains:
path: spec.ports
content:
name: mongo
port: 27017
targetPort: 27017
protocol: TCP
- it: sets AWS backend protocol annotation to ssl when in AWS mode and ACM annotation is set
values:
- ../.lint/aws-ha.yaml
set:
annotations:
service:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:1234567890:certificate/a857a76c-51d0-4d3d-8000-465bb3e9829b
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: 443
asserts:
- equal:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol
value: ssl
- it: does not add AWS backend protocol annotation when in AWS mode, ACM annotation is set and ingress is enabled
values:
- ../.lint/aws-ha.yaml
set:
ingress:
enabled: true
service:
type: ClusterIP
annotations:
service:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:1234567890:certificate/a857a76c-51d0-4d3d-8000-465bb3e9829b
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: 443
asserts:
- isNull:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol
- it: sets AWS backend protocol annotation to tcp when in AWS mode and ACM annotation is not set
values:
- ../.lint/aws-ha.yaml
asserts:
- equal:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol
value: tcp
- it: does not set AWS backend protocol annotation when in AWS mode, ACM annotation is not set and ingress is enabled
values:
- ../.lint/aws-ha.yaml
set:
ingress:
enabled: true
service:
type: ClusterIP
annotations:
service:
# required so at least one service annotation exists, to avoid non map type error
service.beta.kubernetes.io/random-annotation: helm-lint
asserts:
- isNull:
path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol
- it: exposes separate listener ports by default
values:
- ../.lint/example-minimal-standalone.yaml
asserts:
- matchSnapshot:
path: spec.ports
- it: does not expose separate listener ports by default when ingress.enabled=true
values:
- ../.lint/example-minimal-standalone.yaml
set:
ingress:
enabled: true
proxyListenerMode: multiplex
service:
type: ClusterIP
asserts:
- notContains:
path: spec.ports
content:
- name: sshproxy
port: 3023
targetPort: 3023
protocol: TCP
- name: k8s
port: 3026
targetPort: 3026
protocol: TCP
- name: sshtun
port: 3024
targetPort: 3024
protocol: TCP
- name: mysql
port: 3036
targetPort: 3036
protocol: TCP
- matchSnapshot:
path: spec.ports
- it: exposes separate listener ports when running in separate mode
values:
- ../.lint/proxy-listener-mode-separate.yaml
asserts:
- matchSnapshot:
path: spec.ports
- it: does not expose separate listener ports when running in separate mode and ingress.enabled=true
values:
- ../.lint/proxy-listener-mode-separate.yaml
set:
ingress:
enabled: true
proxyListenerMode: multiplex
service:
type: ClusterIP
asserts:
- notContains:
path: spec.ports
content:
- name: sshproxy
port: 3023
targetPort: 3023
protocol: TCP
- name: k8s
port: 3026
targetPort: 3026
protocol: TCP
- name: sshtun
port: 3024
targetPort: 3024
protocol: TCP
- name: mysql
port: 3036
targetPort: 3036
protocol: TCP
- matchSnapshot:
path: spec.ports
- it: exposes a single port when running in multiplex mode
values:
- ../.lint/proxy-listener-mode-multiplex.yaml
asserts:
- matchSnapshot:
path: spec.ports
- it: exposes a single port when running in multiplex mode and ingress.enabled=true
values:
- ../.lint/proxy-listener-mode-multiplex.yaml
set:
ingress:
enabled: true
service:
type: ClusterIP
asserts:
- matchSnapshot:
path: spec.ports

View File

@@ -0,0 +1,22 @@
suite: Proxy ServiceAccount
templates:
- proxy/serviceaccount.yaml
tests:
- it: sets ServiceAccount annotations when specified
values:
- ../.lint/annotations.yaml
asserts:
- equal:
path: metadata.annotations.kubernetes\.io/serviceaccount
value: test-annotation
- equal:
path: metadata.annotations.kubernetes\.io/serviceaccount-different
value: 6
- it: changes ServiceAccount name when specified and appends "-proxy"
values:
- ../.lint/service-account.yaml
asserts:
- equal:
path: metadata.name
value: "helm-lint-proxy"

View File

@@ -0,0 +1,35 @@
suite: PodSecurityPolicy
templates:
- psp.yaml
tests:
- it: creates a PodSecurityPolicy when enabled in values and supported
capabilities:
majorVersion: 1
minorVersion: 22
set:
podSecurityPolicy:
enabled: true
asserts:
- hasDocuments:
count: 3
- documentIndex: 0
isKind:
of: PodSecurityPolicy
- documentIndex: 1
isKind:
of: Role
- documentIndex: 2
isKind:
of: RoleBinding
- matchSnapshot: {}
- it: does not create a PodSecurityPolicy when enabled in values but not supported
set:
podSecurityPolicy:
enabled: true
capabilities:
majorVersion: 1
minorVersion: 25
asserts:
- hasDocuments:
count: 0