diff --git a/helm/old/teleport-cluster/.lint/acme-off.yaml b/helm/old/teleport-cluster/.lint/acme-off.yaml new file mode 100644 index 0000000..29a9052 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/acme-off.yaml @@ -0,0 +1,3 @@ +clusterName: test-cluster-name +extraArgs: +- "--insecure" diff --git a/helm/old/teleport-cluster/.lint/acme-on.yaml b/helm/old/teleport-cluster/.lint/acme-on.yaml new file mode 100644 index 0000000..02821dc --- /dev/null +++ b/helm/old/teleport-cluster/.lint/acme-on.yaml @@ -0,0 +1,3 @@ +clusterName: test-acme-cluster +acme: true +acmeEmail: test@email.com diff --git a/helm/old/teleport-cluster/.lint/acme-uri-staging.yaml b/helm/old/teleport-cluster/.lint/acme-uri-staging.yaml new file mode 100644 index 0000000..2794d6d --- /dev/null +++ b/helm/old/teleport-cluster/.lint/acme-uri-staging.yaml @@ -0,0 +1,4 @@ +clusterName: test-acme-cluster +acme: true +acmeEmail: test@email.com +acmeURI: https://acme-staging-v02.api.letsencrypt.org/directory diff --git a/helm/old/teleport-cluster/.lint/affinity.yaml b/helm/old/teleport-cluster/.lint/affinity.yaml new file mode 100644 index 0000000..e984e7d --- /dev/null +++ b/helm/old/teleport-cluster/.lint/affinity.yaml @@ -0,0 +1,29 @@ +clusterName: test-gcp-cluster +chartMode: gcp +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + sessionRecordingBucket: test-gcp-session-storage-bucket +highAvailability: + replicaCount: 2 +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - teleport + topologyKey: kubernetes.io/hostname + weight: 1 diff --git a/helm/old/teleport-cluster/.lint/annotations.yaml b/helm/old/teleport-cluster/.lint/annotations.yaml new file mode 100644 index 0000000..4e9fce5 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/annotations.yaml @@ -0,0 +1,17 @@ +clusterName: helm-lint +annotations: + config: + kubernetes.io/config: "test-annotation" + kubernetes.io/config-different: 2 + deployment: + kubernetes.io/deployment: "test-annotation" + kubernetes.io/deployment-different: 3 + pod: + kubernetes.io/pod: "test-annotation" + kubernetes.io/pod-different: 4 + service: + kubernetes.io/service: "test-annotation" + kubernetes.io/service-different: 5 + serviceAccount: + kubernetes.io/serviceaccount: "test-annotation" + kubernetes.io/serviceaccount-different: 6 diff --git a/helm/old/teleport-cluster/.lint/auth-connector-name.yaml b/helm/old/teleport-cluster/.lint/auth-connector-name.yaml new file mode 100644 index 0000000..4e77b8b --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-connector-name.yaml @@ -0,0 +1,3 @@ +clusterName: helm-lint +authentication: + connectorName: "okta" diff --git a/helm/old/teleport-cluster/.lint/auth-disable-local.yaml b/helm/old/teleport-cluster/.lint/auth-disable-local.yaml new file mode 100644 index 0000000..b4d6aa1 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-disable-local.yaml @@ -0,0 +1,5 @@ +clusterName: helm-lint +authentication: + type: "github" + localAuth: false + secondFactor: "off" diff --git a/helm/old/teleport-cluster/.lint/auth-locking-mode.yaml b/helm/old/teleport-cluster/.lint/auth-locking-mode.yaml new file mode 100644 index 0000000..4c64cfb --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-locking-mode.yaml @@ -0,0 +1,3 @@ +clusterName: helm-lint +authentication: + lockingMode: "strict" diff --git a/helm/old/teleport-cluster/.lint/auth-passwordless.yaml b/helm/old/teleport-cluster/.lint/auth-passwordless.yaml new file mode 100644 index 0000000..9e33d9c --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-passwordless.yaml @@ -0,0 +1,4 @@ +clusterName: helm-lint +authentication: + connectorName: passwordless + secondFactor: webauthn diff --git a/helm/old/teleport-cluster/.lint/auth-type-legacy.yaml b/helm/old/teleport-cluster/.lint/auth-type-legacy.yaml new file mode 100644 index 0000000..5420bf1 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-type-legacy.yaml @@ -0,0 +1,4 @@ +clusterName: helm-lint +authentication: + type: "this-should-be-ignored" +authenticationType: "github" diff --git a/helm/old/teleport-cluster/.lint/auth-type.yaml b/helm/old/teleport-cluster/.lint/auth-type.yaml new file mode 100644 index 0000000..9c71d82 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-type.yaml @@ -0,0 +1,3 @@ +clusterName: helm-lint +authentication: + type: "github" diff --git a/helm/old/teleport-cluster/.lint/auth-webauthn-legacy.yaml b/helm/old/teleport-cluster/.lint/auth-webauthn-legacy.yaml new file mode 100644 index 0000000..fd69d97 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-webauthn-legacy.yaml @@ -0,0 +1,10 @@ +clusterName: helm-lint +authentication: + secondFactor: "off" # this should be overridden +authenticationSecondFactor: + secondFactor: "on" + webauthn: + attestationAllowedCas: + - "/etc/ssl/certs/ca-certificates.crt" + attestationDeniedCas: + - "/etc/ssl/certs/ca-certificates.crt" diff --git a/helm/old/teleport-cluster/.lint/auth-webauthn.yaml b/helm/old/teleport-cluster/.lint/auth-webauthn.yaml new file mode 100644 index 0000000..e8702e1 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/auth-webauthn.yaml @@ -0,0 +1,8 @@ +clusterName: helm-lint +authentication: + secondFactor: "on" + webauthn: + attestationAllowedCas: + - "/etc/ssl/certs/ca-certificates.crt" + attestationDeniedCas: + - "/etc/ssl/certs/ca-certificates.crt" diff --git a/helm/old/teleport-cluster/.lint/aws-dynamodb-autoscaling.yaml b/helm/old/teleport-cluster/.lint/aws-dynamodb-autoscaling.yaml new file mode 100644 index 0000000..c1dde28 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/aws-dynamodb-autoscaling.yaml @@ -0,0 +1,14 @@ +clusterName: test-aws-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket + dynamoAutoScaling: true + readMinCapacity: 5 + readMaxCapacity: 100 + readTargetValue: 50.0 + writeMinCapacity: 5 + writeMaxCapacity: 100 + writeTargetValue: 50.0 diff --git a/helm/old/teleport-cluster/.lint/aws-ha-acme.yaml b/helm/old/teleport-cluster/.lint/aws-ha-acme.yaml new file mode 100644 index 0000000..c2c4d2e --- /dev/null +++ b/helm/old/teleport-cluster/.lint/aws-ha-acme.yaml @@ -0,0 +1,14 @@ +clusterName: test-aws-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 3 + certManager: + enabled: true + issuerName: letsencrypt-production +labels: + env: aws diff --git a/helm/old/teleport-cluster/.lint/aws-ha-antiaffinity.yaml b/helm/old/teleport-cluster/.lint/aws-ha-antiaffinity.yaml new file mode 100644 index 0000000..0e639a2 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/aws-ha-antiaffinity.yaml @@ -0,0 +1,12 @@ +clusterName: test-aws-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 3 + requireAntiAffinity: true +labels: + env: aws diff --git a/helm/old/teleport-cluster/.lint/aws-ha-log.yaml b/helm/old/teleport-cluster/.lint/aws-ha-log.yaml new file mode 100644 index 0000000..733466b --- /dev/null +++ b/helm/old/teleport-cluster/.lint/aws-ha-log.yaml @@ -0,0 +1,17 @@ +clusterName: test-aws-cluster +chartMode: aws +log: + level: DEBUG +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + auditLogMirrorOnStdout: true + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 2 + certManager: + enabled: true + issuerName: letsencrypt-production +labels: + env: aws diff --git a/helm/old/teleport-cluster/.lint/aws-ha.yaml b/helm/old/teleport-cluster/.lint/aws-ha.yaml new file mode 100644 index 0000000..5bb2120 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/aws-ha.yaml @@ -0,0 +1,11 @@ +clusterName: test-aws-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 3 +labels: + env: aws diff --git a/helm/old/teleport-cluster/.lint/aws.yaml b/helm/old/teleport-cluster/.lint/aws.yaml new file mode 100644 index 0000000..0c822e3 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/aws.yaml @@ -0,0 +1,11 @@ +clusterName: test-aws-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +acme: true +acmeEmail: test@email.com +labels: + env: aws diff --git a/helm/old/teleport-cluster/.lint/azure.yaml b/helm/old/teleport-cluster/.lint/azure.yaml new file mode 100644 index 0000000..f755c36 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/azure.yaml @@ -0,0 +1,11 @@ +clusterName: test-azure-cluster +chartMode: azure +azure: + databaseHost: "mypostgresinstance.postgres.database.azure.com" + databaseUser: "teleport" + backendDatabase: "teleport_backend" + auditLogDatabase: "teleport_audit" + auditLogMirrorOnStdout: true + sessionRecordingStorageAccount: "mystorageaccount.blob.core.windows.net" + clientID: "1234" + databasePoolMaxConnections: 100 diff --git a/helm/old/teleport-cluster/.lint/cert-manager.yaml b/helm/old/teleport-cluster/.lint/cert-manager.yaml new file mode 100644 index 0000000..7748890 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/cert-manager.yaml @@ -0,0 +1,15 @@ +clusterName: test-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 3 + certManager: + addCommonName: true + enabled: true + issuerGroup: custom.cert-manager.io + issuerName: custom + issuerKind: CustomClusterIssuer diff --git a/helm/old/teleport-cluster/.lint/cert-secret.yaml b/helm/old/teleport-cluster/.lint/cert-secret.yaml new file mode 100644 index 0000000..d86eb31 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/cert-secret.yaml @@ -0,0 +1,15 @@ +clusterName: test-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +annotations: + certSecret: + kubernetes.io/cert-secret: value +highAvailability: + replicaCount: 3 + certManager: + enabled: true + issuerName: letsencrypt diff --git a/helm/old/teleport-cluster/.lint/example-minimal-standalone.yaml b/helm/old/teleport-cluster/.lint/example-minimal-standalone.yaml new file mode 100644 index 0000000..9cdba9a --- /dev/null +++ b/helm/old/teleport-cluster/.lint/example-minimal-standalone.yaml @@ -0,0 +1,7 @@ +# This setup is not safe for production because the proxy will self-sign its certificate. +# Use those values for testing only + +# The chart should deploy and work only with a clusterName. +# This setup can also cause redirection issues if the proxy is contacted with a hostName instead of an IP address +# as it is not aware of its external hostname and will attempt to perform a redirection. +clusterName: helm-lint diff --git a/helm/old/teleport-cluster/.lint/existing-tls-secret-with-ca.yaml b/helm/old/teleport-cluster/.lint/existing-tls-secret-with-ca.yaml new file mode 100644 index 0000000..086c628 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/existing-tls-secret-with-ca.yaml @@ -0,0 +1,4 @@ +clusterName: test-cluster-name +tls: + existingSecretName: helm-lint-existing-tls-secret + existingCASecretName: helm-lint-existing-tls-secret-ca diff --git a/helm/old/teleport-cluster/.lint/existing-tls-secret.yaml b/helm/old/teleport-cluster/.lint/existing-tls-secret.yaml new file mode 100644 index 0000000..37f07ea --- /dev/null +++ b/helm/old/teleport-cluster/.lint/existing-tls-secret.yaml @@ -0,0 +1,3 @@ +clusterName: test-cluster-name +tls: + existingSecretName: helm-lint-existing-tls-secret diff --git a/helm/old/teleport-cluster/.lint/extra-env.yaml b/helm/old/teleport-cluster/.lint/extra-env.yaml new file mode 100644 index 0000000..ea0d122 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/extra-env.yaml @@ -0,0 +1,4 @@ +clusterName: helm-lint.example.com +extraEnv: + - name: SOME_ENVIRONMENT_VARIABLE + value: "some-value" diff --git a/helm/old/teleport-cluster/.lint/gcp-ha-acme.yaml b/helm/old/teleport-cluster/.lint/gcp-ha-acme.yaml new file mode 100644 index 0000000..d122907 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/gcp-ha-acme.yaml @@ -0,0 +1,14 @@ +clusterName: test-gcp-cluster +chartMode: gcp +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + sessionRecordingBucket: test-gcp-session-storage-bucket +highAvailability: + replicaCount: 3 + certManager: + enabled: true + issuerName: letsencrypt-production +labels: + env: gcp diff --git a/helm/old/teleport-cluster/.lint/gcp-ha-antiaffinity.yaml b/helm/old/teleport-cluster/.lint/gcp-ha-antiaffinity.yaml new file mode 100644 index 0000000..9743cad --- /dev/null +++ b/helm/old/teleport-cluster/.lint/gcp-ha-antiaffinity.yaml @@ -0,0 +1,12 @@ +clusterName: test-gcp-cluster +chartMode: gcp +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + sessionRecordingBucket: test-gcp-session-storage-bucket +highAvailability: + replicaCount: 3 + requireAntiAffinity: true +labels: + env: gcp diff --git a/helm/old/teleport-cluster/.lint/gcp-ha-log.yaml b/helm/old/teleport-cluster/.lint/gcp-ha-log.yaml new file mode 100644 index 0000000..d13f73c --- /dev/null +++ b/helm/old/teleport-cluster/.lint/gcp-ha-log.yaml @@ -0,0 +1,17 @@ +clusterName: test-gcp-cluster +chartMode: gcp +log: + level: DEBUG +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + auditLogMirrorOnStdout: true + sessionRecordingBucket: test-gcp-session-storage-bucket +highAvailability: + replicaCount: 3 + certManager: + enabled: true + issuerName: letsencrypt-production +labels: + env: gcp diff --git a/helm/old/teleport-cluster/.lint/gcp-ha-workload.yaml b/helm/old/teleport-cluster/.lint/gcp-ha-workload.yaml new file mode 100644 index 0000000..0568bbf --- /dev/null +++ b/helm/old/teleport-cluster/.lint/gcp-ha-workload.yaml @@ -0,0 +1,12 @@ +clusterName: test-gcp-cluster +chartMode: gcp +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + sessionRecordingBucket: test-gcp-session-storage-bucket + credentialSecretName: "" +highAvailability: + replicaCount: 3 +labels: + env: gcp diff --git a/helm/old/teleport-cluster/.lint/gcp-ha.yaml b/helm/old/teleport-cluster/.lint/gcp-ha.yaml new file mode 100644 index 0000000..26b43d4 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/gcp-ha.yaml @@ -0,0 +1,11 @@ +clusterName: test-gcp-cluster +chartMode: gcp +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + sessionRecordingBucket: test-gcp-session-storage-bucket +highAvailability: + replicaCount: 3 +labels: + env: gcp diff --git a/helm/old/teleport-cluster/.lint/gcp.yaml b/helm/old/teleport-cluster/.lint/gcp.yaml new file mode 100644 index 0000000..56a395b --- /dev/null +++ b/helm/old/teleport-cluster/.lint/gcp.yaml @@ -0,0 +1,11 @@ +clusterName: test-gcp-cluster +chartMode: gcp +gcp: + projectId: gcpproj-123456 + backendTable: test-teleport-firestore-storage-collection + auditLogTable: test-teleport-firestore-auditlog-collection + sessionRecordingBucket: test-gcp-session-storage-bucket +acme: true +acmeEmail: test@email.com +labels: + env: gcp diff --git a/helm/old/teleport-cluster/.lint/imagepullsecrets.yaml b/helm/old/teleport-cluster/.lint/imagepullsecrets.yaml new file mode 100644 index 0000000..f414f8c --- /dev/null +++ b/helm/old/teleport-cluster/.lint/imagepullsecrets.yaml @@ -0,0 +1,4 @@ +clusterName: test-standalone-cluster +chartMode: standalone +imagePullSecrets: +- name: myRegistryKeySecretName diff --git a/helm/old/teleport-cluster/.lint/ingress-publicaddr.yaml b/helm/old/teleport-cluster/.lint/ingress-publicaddr.yaml new file mode 100644 index 0000000..0e9692a --- /dev/null +++ b/helm/old/teleport-cluster/.lint/ingress-publicaddr.yaml @@ -0,0 +1,8 @@ +clusterName: teleport.example.com +publicAddr: ["my-teleport-ingress.example.com:443"] +ingress: + enabled: true + suppressAutomaticWildcards: true +proxyListenerMode: multiplex +service: + type: ClusterIP diff --git a/helm/old/teleport-cluster/.lint/ingress.yaml b/helm/old/teleport-cluster/.lint/ingress.yaml new file mode 100644 index 0000000..e5fbbc4 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/ingress.yaml @@ -0,0 +1,6 @@ +clusterName: teleport.example.com +ingress: + enabled: true +proxyListenerMode: multiplex +service: + type: ClusterIP diff --git a/helm/old/teleport-cluster/.lint/initcontainers.yaml b/helm/old/teleport-cluster/.lint/initcontainers.yaml new file mode 100644 index 0000000..a558e45 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/initcontainers.yaml @@ -0,0 +1,8 @@ +clusterName: helm-lint +initContainers: +- name: "teleport-init" + image: "alpine" + args: ["echo test"] +- name: "teleport-init2" + image: "alpine" + args: ["echo test2"] diff --git a/helm/old/teleport-cluster/.lint/kube-cluster-name.yaml b/helm/old/teleport-cluster/.lint/kube-cluster-name.yaml new file mode 100644 index 0000000..ccd510b --- /dev/null +++ b/helm/old/teleport-cluster/.lint/kube-cluster-name.yaml @@ -0,0 +1,2 @@ +clusterName: test-aws-cluster +kubeClusterName: test-kube-cluster diff --git a/helm/old/teleport-cluster/.lint/log-basic.yaml b/helm/old/teleport-cluster/.lint/log-basic.yaml new file mode 100644 index 0000000..037e189 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/log-basic.yaml @@ -0,0 +1,4 @@ +clusterName: test-log-cluster +log: + format: json + level: INFO diff --git a/helm/old/teleport-cluster/.lint/log-extra.yaml b/helm/old/teleport-cluster/.lint/log-extra.yaml new file mode 100644 index 0000000..7f3e21b --- /dev/null +++ b/helm/old/teleport-cluster/.lint/log-extra.yaml @@ -0,0 +1,6 @@ +clusterName: test-log-cluster +log: + format: json + level: DEBUG + output: /var/lib/teleport/test.log + extraFields: ["level", "timestamp", "component", "caller"] diff --git a/helm/old/teleport-cluster/.lint/log-legacy.yaml b/helm/old/teleport-cluster/.lint/log-legacy.yaml new file mode 100644 index 0000000..b28d3ab --- /dev/null +++ b/helm/old/teleport-cluster/.lint/log-legacy.yaml @@ -0,0 +1,2 @@ +clusterName: test-log-cluster +logLevel: DEBUG diff --git a/helm/old/teleport-cluster/.lint/node-selector.yaml b/helm/old/teleport-cluster/.lint/node-selector.yaml new file mode 100644 index 0000000..d3c1f06 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/node-selector.yaml @@ -0,0 +1,4 @@ +clusterName: test-cluster-name +nodeSelector: + role: bastion + environment: security diff --git a/helm/old/teleport-cluster/.lint/operator.yaml b/helm/old/teleport-cluster/.lint/operator.yaml new file mode 100644 index 0000000..e390d5b --- /dev/null +++ b/helm/old/teleport-cluster/.lint/operator.yaml @@ -0,0 +1,4 @@ +clusterName: test-cluster-name +operator: + enabled: true +installCRDs: true diff --git a/helm/old/teleport-cluster/.lint/pdb.yaml b/helm/old/teleport-cluster/.lint/pdb.yaml new file mode 100644 index 0000000..0504d09 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/pdb.yaml @@ -0,0 +1,12 @@ +clusterName: helm-lint +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 3 + podDisruptionBudget: + enabled: true + minAvailable: 2 diff --git a/helm/old/teleport-cluster/.lint/persistence-legacy.yaml b/helm/old/teleport-cluster/.lint/persistence-legacy.yaml new file mode 100644 index 0000000..0d9a124 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/persistence-legacy.yaml @@ -0,0 +1,4 @@ +clusterName: test-persistence-cluster +standalone: + existingClaimName: "" + volumeSize: 10Gi diff --git a/helm/old/teleport-cluster/.lint/podmonitor.yaml b/helm/old/teleport-cluster/.lint/podmonitor.yaml new file mode 100644 index 0000000..1c263f5 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/podmonitor.yaml @@ -0,0 +1,6 @@ +clusterName: test-kube-cluster-name +podMonitor: + enabled: true + additionalLabels: + prometheus: default + interval: 30s diff --git a/helm/old/teleport-cluster/.lint/priority-class-name.yaml b/helm/old/teleport-cluster/.lint/priority-class-name.yaml new file mode 100644 index 0000000..3386375 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/priority-class-name.yaml @@ -0,0 +1,4 @@ +clusterName: helm-lint +# These are just sample values to test the chart. +# They are not intended to be guidelines or suggestions for running teleport. +priorityClassName: "system-cluster-critical" diff --git a/helm/old/teleport-cluster/.lint/probe-timeout-seconds.yaml b/helm/old/teleport-cluster/.lint/probe-timeout-seconds.yaml new file mode 100644 index 0000000..a239435 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/probe-timeout-seconds.yaml @@ -0,0 +1,4 @@ +clusterName: helm-lint +# These are just sample values to test the chart. +# They are not intended to be guidelines or suggestions for running teleport. +probeTimeoutSeconds: 5 diff --git a/helm/old/teleport-cluster/.lint/proxy-listener-mode-multiplex.yaml b/helm/old/teleport-cluster/.lint/proxy-listener-mode-multiplex.yaml new file mode 100644 index 0000000..87ac0b3 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/proxy-listener-mode-multiplex.yaml @@ -0,0 +1,2 @@ +clusterName: test-proxy-listener-mode +proxyListenerMode: multiplex diff --git a/helm/old/teleport-cluster/.lint/proxy-listener-mode-separate.yaml b/helm/old/teleport-cluster/.lint/proxy-listener-mode-separate.yaml new file mode 100644 index 0000000..3be257a --- /dev/null +++ b/helm/old/teleport-cluster/.lint/proxy-listener-mode-separate.yaml @@ -0,0 +1,2 @@ +clusterName: test-proxy-listener-mode +proxyListenerMode: separate diff --git a/helm/old/teleport-cluster/.lint/public-addresses.yaml b/helm/old/teleport-cluster/.lint/public-addresses.yaml new file mode 100644 index 0000000..1122492 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/public-addresses.yaml @@ -0,0 +1,11 @@ +clusterName: helm-lint +publicAddr: ["loadbalancer.example.com:443"] +sshPublicAddr: ["loadbalancer.example.com:3023"] +tunnelPublicAddr: ["loadbalancer.example.com:3024"] +postgresPublicAddr: ["loadbalancer.example.com:5432"] +mongoPublicAddr: ["loadbalancer.example.com:27017"] +mysqlPublicAddr: ["loadbalancer.example.com:3036"] +kubePublicAddr: ["loadbalancer.example.com:3026"] + +separatePostgresListener: true +separateMongoListener: true diff --git a/helm/old/teleport-cluster/.lint/resources.yaml b/helm/old/teleport-cluster/.lint/resources.yaml new file mode 100644 index 0000000..070a85c --- /dev/null +++ b/helm/old/teleport-cluster/.lint/resources.yaml @@ -0,0 +1,10 @@ +clusterName: helm-lint +# These are just sample values to test the chart. +# They are not intended to be guidelines or suggestions for running teleport. +resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi diff --git a/helm/old/teleport-cluster/.lint/security-context-empty.yaml b/helm/old/teleport-cluster/.lint/security-context-empty.yaml new file mode 100644 index 0000000..14ff546 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/security-context-empty.yaml @@ -0,0 +1 @@ +clusterName: helm-lint diff --git a/helm/old/teleport-cluster/.lint/security-context.yaml b/helm/old/teleport-cluster/.lint/security-context.yaml new file mode 100644 index 0000000..32e4015 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/security-context.yaml @@ -0,0 +1,8 @@ +clusterName: helm-lint +securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 diff --git a/helm/old/teleport-cluster/.lint/separate-mongo-listener.yaml b/helm/old/teleport-cluster/.lint/separate-mongo-listener.yaml new file mode 100644 index 0000000..23bac08 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/separate-mongo-listener.yaml @@ -0,0 +1,2 @@ +clusterName: helm-lint +separateMongoListener: true diff --git a/helm/old/teleport-cluster/.lint/separate-postgres-listener.yaml b/helm/old/teleport-cluster/.lint/separate-postgres-listener.yaml new file mode 100644 index 0000000..0a1196f --- /dev/null +++ b/helm/old/teleport-cluster/.lint/separate-postgres-listener.yaml @@ -0,0 +1,2 @@ +clusterName: helm-lint +separatePostgresListener: true diff --git a/helm/old/teleport-cluster/.lint/service-account.yaml b/helm/old/teleport-cluster/.lint/service-account.yaml new file mode 100644 index 0000000..a6f9678 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/service-account.yaml @@ -0,0 +1,7 @@ +clusterName: helm-lint +serviceAccount: + create: true + name: helm-lint +annotations: + serviceAccount: + kubernetes.io/serviceaccount: "test-annotation" diff --git a/helm/old/teleport-cluster/.lint/service.yaml b/helm/old/teleport-cluster/.lint/service.yaml new file mode 100644 index 0000000..0a8eed6 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/service.yaml @@ -0,0 +1,5 @@ +clusterName: helm-lint +service: + type: LoadBalancer + spec: + loadBalancerIP: 1.2.3.4 diff --git a/helm/old/teleport-cluster/.lint/session-recording.yaml b/helm/old/teleport-cluster/.lint/session-recording.yaml new file mode 100644 index 0000000..8b41012 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/session-recording.yaml @@ -0,0 +1,2 @@ +clusterName: helm-lint +sessionRecording: "node-sync" diff --git a/helm/old/teleport-cluster/.lint/standalone-custom-storage-class.yaml b/helm/old/teleport-cluster/.lint/standalone-custom-storage-class.yaml new file mode 100644 index 0000000..4cf5ade --- /dev/null +++ b/helm/old/teleport-cluster/.lint/standalone-custom-storage-class.yaml @@ -0,0 +1,9 @@ +clusterName: test-standalone-cluster +chartMode: standalone +persistence: + enabled: true + storageClassName: ebs-ssd +acme: true +acmeEmail: test@email.com +labels: + env: standalone diff --git a/helm/old/teleport-cluster/.lint/standalone-customsize.yaml b/helm/old/teleport-cluster/.lint/standalone-customsize.yaml new file mode 100644 index 0000000..c994faa --- /dev/null +++ b/helm/old/teleport-cluster/.lint/standalone-customsize.yaml @@ -0,0 +1,9 @@ +clusterName: test-standalone-cluster +chartMode: standalone +persistence: + enabled: true + volumeSize: 50Gi +acme: true +acmeEmail: test@email.com +labels: + env: standalone diff --git a/helm/old/teleport-cluster/.lint/standalone-existingpvc.yaml b/helm/old/teleport-cluster/.lint/standalone-existingpvc.yaml new file mode 100644 index 0000000..89292ef --- /dev/null +++ b/helm/old/teleport-cluster/.lint/standalone-existingpvc.yaml @@ -0,0 +1,9 @@ +clusterName: test-standalone-cluster +chartMode: standalone +persistence: + enabled: true + existingClaimName: teleport-storage +acme: true +acmeEmail: test@email.com +labels: + env: standalone diff --git a/helm/old/teleport-cluster/.lint/tolerations.yaml b/helm/old/teleport-cluster/.lint/tolerations.yaml new file mode 100644 index 0000000..69d4161 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/tolerations.yaml @@ -0,0 +1,18 @@ +clusterName: test-aws-cluster +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket +highAvailability: + replicaCount: 3 +tolerations: +- key: "dedicated" + operator: "Equal" + value: "teleport" + effect: "NoExecute" +- key: "dedicated" + operator: "Equal" + value: "teleport" + effect: "NoSchedule" diff --git a/helm/old/teleport-cluster/.lint/version-override.yaml b/helm/old/teleport-cluster/.lint/version-override.yaml new file mode 100644 index 0000000..689e958 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/version-override.yaml @@ -0,0 +1,5 @@ +clusterName: test-cluster-name +teleportVersionOverride: 5.2.1 +labels: + env: test + version: 5.2.1 diff --git a/helm/old/teleport-cluster/.lint/volumes.yaml b/helm/old/teleport-cluster/.lint/volumes.yaml new file mode 100644 index 0000000..a1ce300 --- /dev/null +++ b/helm/old/teleport-cluster/.lint/volumes.yaml @@ -0,0 +1,8 @@ +clusterName: helm-lint +extraVolumeMounts: +- name: "my-mount" + mountPath: "/path/to/mount" +extraVolumes: +- name: "my-mount" + secret: + secretName: "mySecret" diff --git a/helm/old/teleport-cluster/Chart.yaml b/helm/old/teleport-cluster/Chart.yaml new file mode 100644 index 0000000..73b2a2d --- /dev/null +++ b/helm/old/teleport-cluster/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +appVersion: 13.3.9 +dependencies: +- condition: installCRDs,operator.enabled + name: teleport-operator + repository: "" + version: 13.3.9 +description: Teleport is an access platform for your infrastructure +icon: https://goteleport.com/images/logos/logo-teleport-square.svg +keywords: +- Teleport +name: teleport-cluster +version: 13.3.9 diff --git a/helm/old/teleport-cluster/README.md b/helm/old/teleport-cluster/README.md new file mode 100644 index 0000000..27e6235 --- /dev/null +++ b/helm/old/teleport-cluster/README.md @@ -0,0 +1,64 @@ +# Teleport Cluster + +This chart sets up a single node Teleport cluster. +It uses a persistent volume claim for storage. +Great for getting started with Teleport. + +## Important Notices + +- The chart version follows the Teleport version. e.g. chart v10.x can run Teleport v10.x and v11.x, but is not compatible with Teleport 9.x +- Teleport does mutual TLS to authenticate clients. It currently does not support running behind a L7 LoadBalancer, like a Kubernetes `Ingress`. It requires being exposed through a L4 LoadBalancer (Kubernetes `Service`). + +## Getting Started + +### Single-node example + +To install Teleport in a separate namespace and provision a web certificate using Let's Encrypt, run: + +```bash +$ helm install teleport/teleport-cluster \ + --set acme=true \ + --set acmeEmail=alice@example.com \ + --set clusterName=teleport.example.com\ + --create-namespace \ + --namespace=teleport-cluster \ + ./teleport-cluster/ +``` + +Finally, configure the DNS for `teleport.example.com` to point to the newly created LoadBalancer. + +Note: this guide uses the built-in ACME client to get certificates. +In this setup, Teleport nodes cannot be replicated. If you want to run multiple +Teleport replicas, you must provide a certificate through `tls.existingSecretName` +or by installing [cert-manager](https://cert-manager.io/docs/) and setting the `highAvailability.certManager.*` values. + +### Replicated setup guides + +- [Running an HA Teleport cluster in Kubernetes using an AWS EKS Cluster](https://goteleport.com/docs/deploy-a-cluster/helm-deployments/aws/) +- [Running an HA Teleport cluster in Kubernetes using a Google Cloud GKE cluster](https://goteleport.com/docs/deploy-a-cluster/helm-deployments/gcp/) +- [Running a Teleport cluster in Kubernetes with a custom Teleport config](https://goteleport.com/docs/deploy-a-cluster/helm-deployments/custom/) + +### Creating first user + +The first user can be created by executing a command in one of the auth pods. + +```shell +kubectl exec it -n teleport-cluster statefulset/teleport-cluster-auth -- tctl users add my-username --roles=editor,auditor,access +``` + +The command should output a registration link to finalize the user creation. + +## Uninstalling + +```bash +helm uninstall --namespace teleport-cluster teleport-cluster +``` + +## Documentation + +See https://goteleport.com/docs/kubernetes-access/helm/guides/ for guides on setting up HA Teleport clusters +in EKS or GKE, plus a comprehensive chart reference. + +## Contributing to the chart + +Please read [CONTRIBUTING.md](../CONTRIBUTING.md) before raising a pull request to this chart. diff --git a/helm/old/teleport-cluster/charts/teleport-operator/Chart.yaml b/helm/old/teleport-cluster/charts/teleport-operator/Chart.yaml new file mode 100644 index 0000000..944d2ad --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 13.3.9 +description: Teleport Operator provides management of select Teleport resources. +icon: https://goteleport.com/images/logos/logo-teleport-square.svg +keywords: +- Teleport +name: teleport-operator +version: 13.3.9 diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_githubconnectors.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_githubconnectors.yaml new file mode 100644 index 0000000..40bbc08 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_githubconnectors.yaml @@ -0,0 +1,168 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportgithubconnectors.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportGithubConnector + listKind: TeleportGithubConnectorList + plural: teleportgithubconnectors + shortNames: + - githubconnector + - githubconnectors + singular: teleportgithubconnector + scope: Namespaced + versions: + - name: v3 + schema: + openAPIV3Schema: + description: GithubConnector is the Schema for the githubconnectors API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GithubConnector resource definition v3 from Teleport + properties: + api_endpoint_url: + description: APIEndpointURL is the URL of the API endpoint of the + Github instance this connector is for. + type: string + client_id: + description: ClientID is the Github OAuth app client ID. + type: string + client_secret: + description: ClientSecret is the Github OAuth app client secret. + type: string + display: + description: Display is the connector display name. + type: string + endpoint_url: + description: EndpointURL is the URL of the GitHub instance this connector + is for. + type: string + redirect_url: + description: RedirectURL is the authorization callback URL. + type: string + teams_to_roles: + description: TeamsToRoles maps Github team memberships onto allowed + roles. + items: + properties: + organization: + description: Organization is a Github organization a user belongs + to. + type: string + roles: + description: Roles is a list of allowed logins for this org/team. + items: + type: string + nullable: true + type: array + team: + description: Team is a team within the organization a user belongs + to. + type: string + type: object + type: array + type: object + status: + description: TeleportGithubConnectorStatus defines the observed state + of TeleportGithubConnector + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_loginrules.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_loginrules.yaml new file mode 100644 index 0000000..7b5adc6 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_loginrules.yaml @@ -0,0 +1,145 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportloginrules.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportLoginRule + listKind: TeleportLoginRuleList + plural: teleportloginrules + shortNames: + - loginrule + - loginrules + singular: teleportloginrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: LoginRule is the Schema for the loginrules API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LoginRule resource definition v1 from Teleport + properties: + priority: + description: Priority is the priority of the login rule relative to + other login rules in the same cluster. Login rules with a lower + numbered priority will be evaluated first. + format: int32 + type: integer + traits_expression: + description: TraitsExpression is a predicate expression which should + return the desired traits for the user upon login. + type: string + traits_map: + additionalProperties: + items: + type: string + type: array + description: TraitsMap is a map of trait keys to lists of predicate + expressions which should evaluate to the desired values for that + trait. + nullable: true + type: object + type: object + status: + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_oidcconnectors.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_oidcconnectors.yaml new file mode 100644 index 0000000..1dc0e16 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_oidcconnectors.yaml @@ -0,0 +1,213 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportoidcconnectors.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportOIDCConnector + listKind: TeleportOIDCConnectorList + plural: teleportoidcconnectors + shortNames: + - oidcconnector + - oidcconnectors + singular: teleportoidcconnector + scope: Namespaced + versions: + - name: v3 + schema: + openAPIV3Schema: + description: OIDCConnector is the Schema for the oidcconnectors API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OIDCConnector resource definition v3 from Teleport + properties: + acr_values: + description: ACR is an Authentication Context Class Reference value. + The meaning of the ACR value is context-specific and varies for + identity providers. + type: string + allow_unverified_email: + description: AllowUnverifiedEmail tells the connector to accept OIDC + users with unverified emails. + type: boolean + claims_to_roles: + description: ClaimsToRoles specifies a dynamic mapping from claims + to roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + client_id: + description: ClientID is the id of the authentication client (Teleport + Auth server). + type: string + client_secret: + description: ClientSecret is used to authenticate the client. + type: string + display: + description: Display is the friendly name for this provider. + type: string + google_admin_email: + description: GoogleAdminEmail is the email of a google admin to impersonate. + type: string + google_service_account: + description: GoogleServiceAccount is a string containing google service + account credentials. + type: string + google_service_account_uri: + description: GoogleServiceAccountURI is a path to a google service + account uri. + type: string + issuer_url: + description: IssuerURL is the endpoint of the provider, e.g. https://accounts.google.com. + type: string + max_age: + description: MaxAge is the amount of time that user logins are valid + for. If a user logs in, but then does not login again within this + time period, they will be forced to re-authenticate. + format: duration + type: string + prompt: + description: Prompt is an optional OIDC prompt. An empty string omits + prompt. If not specified, it defaults to select_account for backwards + compatibility. + type: string + provider: + description: Provider is the external identity provider. + type: string + redirect_url: + description: RedirectURLs is a list of callback URLs which the identity + provider can use to redirect the client back to the Teleport Proxy + to complete authentication. This list should match the URLs on the + provider's side. The URL used for a given auth request will be chosen + to match the requesting Proxy's public address. If there is no match, + the first url in the list will be used. + items: + type: string + type: array + scope: + description: Scope specifies additional scopes set by provider. + items: + type: string + nullable: true + type: array + username_claim: + description: UsernameClaim specifies the name of the claim from the + OIDC connector to be used as the user's username. + type: string + type: object + status: + description: TeleportOIDCConnectorStatus defines the observed state of + TeleportOIDCConnector + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_oktaimportrules.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_oktaimportrules.yaml new file mode 100644 index 0000000..071d628 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_oktaimportrules.yaml @@ -0,0 +1,183 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportoktaimportrules.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportOktaImportRule + listKind: TeleportOktaImportRuleList + plural: teleportoktaimportrules + shortNames: + - oktaimportrule + - oktaimportrules + singular: teleportoktaimportrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OktaImportRule is the Schema for the oktaimportrules API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OktaImportRule resource definition v1 from Teleport + properties: + mappings: + description: Mappings is a list of matches that will map match conditions + to labels. + items: + properties: + add_labels: + description: AddLabels specifies which labels to add if any + of the previous matches match. + nullable: true + properties: + key: + type: string + value: + type: string + type: object + match: + description: Match is a set of matching rules for this mapping. + If any of these match, then the mapping will be applied. + items: + properties: + app_ids: + description: AppIDs is a list of app IDs to match against. + items: + type: string + nullable: true + type: array + app_name_regexes: + description: AppNameRegexes is a list of regexes to match + against app names. + items: + type: string + nullable: true + type: array + group_ids: + description: GroupIDs is a list of group IDs to match + against. + items: + type: string + nullable: true + type: array + group_name_regexes: + description: GroupNameRegexes is a list of regexes to + match against group names. + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + type: object + nullable: true + type: array + priority: + description: Priority represents the priority of the rule application. + Lower numbered rules will be applied first. + format: int32 + type: integer + type: object + status: + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml new file mode 100644 index 0000000..81c7d92 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml @@ -0,0 +1,353 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportprovisiontokens.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportProvisionToken + listKind: TeleportProvisionTokenList + plural: teleportprovisiontokens + shortNames: + - provisiontoken + - provisiontokens + singular: teleportprovisiontoken + scope: Namespaced + versions: + - name: v2 + schema: + openAPIV3Schema: + description: ProvisionToken is the Schema for the provisiontokens API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProvisionToken resource definition v2 from Teleport + properties: + allow: + description: Allow is a list of TokenRules, nodes using this token + must match one allow rule to use this token. + items: + properties: + aws_account: + description: AWSAccount is the AWS account ID. + type: string + aws_arn: + description: AWSARN is used for the IAM join method, the AWS + identity of joining nodes must match this ARN. Supports wildcards + "*" and "?". + type: string + aws_regions: + description: AWSRegions is used for the EC2 join method and + is a list of AWS regions a node is allowed to join from. + items: + type: string + nullable: true + type: array + aws_role: + description: AWSRole is used for the EC2 join method and is + the the ARN of the AWS role that the auth server will assume + in order to call the ec2 API. + type: string + type: object + nullable: true + type: array + aws_iid_ttl: + description: AWSIIDTTL is the TTL to use for AWS EC2 Instance Identity + Documents used to join the cluster with this token. + format: duration + type: string + azure: + description: Azure allows the configuration of options specific to + the "azure" join method. + nullable: true + properties: + allow: + description: Allow is a list of Rules, nodes using this token + must match one allow rule to use this token. + items: + properties: + resource_groups: + items: + type: string + nullable: true + type: array + subscription: + type: string + type: object + nullable: true + type: array + type: object + bot_name: + description: BotName is the name of the bot this token grants access + to, if any + type: string + circleci: + description: CircleCI allows the configuration of options specific + to the "circleci" join method. + nullable: true + properties: + allow: + description: Allow is a list of TokenRules, nodes using this token + must match one allow rule to use this token. + items: + properties: + context_id: + type: string + project_id: + type: string + type: object + nullable: true + type: array + organization_id: + type: string + type: object + gcp: + description: GCP allows the configuration of options specific to the + "gcp" join method. + nullable: true + properties: + allow: + description: Allow is a list of Rules, nodes using this token + must match one allow rule to use this token. + items: + properties: + locations: + items: + type: string + nullable: true + type: array + project_ids: + items: + type: string + nullable: true + type: array + service_accounts: + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + type: object + github: + description: GitHub allows the configuration of options specific to + the "github" join method. + nullable: true + properties: + allow: + description: Allow is a list of TokenRules, nodes using this token + must match one allow rule to use this token. + items: + properties: + actor: + type: string + environment: + type: string + ref: + type: string + ref_type: + type: string + repository: + type: string + repository_owner: + type: string + sub: + type: string + workflow: + type: string + type: object + nullable: true + type: array + enterprise_server_host: + description: EnterpriseServerHost allows joining from runners + associated with a GitHub Enterprise Server instance. When unconfigured, + tokens will be validated against github.com, but when configured + to the host of a GHES instance, then the tokens will be validated + against host. This value should be the hostname of the GHES + instance, and should not include the scheme or a path. The instance + must be accessible over HTTPS at this hostname and the certificate + must be trusted by the Auth Server. + type: string + type: object + gitlab: + description: GitLab allows the configuration of options specific to + the "gitlab" join method. + nullable: true + properties: + allow: + description: Allow is a list of TokenRules, nodes using this token + must match one allow rule to use this token. + items: + properties: + environment: + type: string + namespace_path: + type: string + pipeline_source: + type: string + project_path: + type: string + ref: + type: string + ref_type: + type: string + sub: + type: string + type: object + nullable: true + type: array + domain: + description: Domain is the domain of your GitLab instance. This + will default to `gitlab.com` - but can be set to the domain + of your self-hosted GitLab e.g `gitlab.example.com`. + type: string + type: object + join_method: + description: JoinMethod is the joining method required in order to + use this token. Supported joining methods include "token", "ec2", + and "iam". + type: string + kubernetes: + description: Kubernetes allows the configuration of options specific + to the "kubernetes" join method. + nullable: true + properties: + allow: + description: Allow is a list of Rules, nodes using this token + must match one allow rule to use this token. + items: + properties: + service_account: + type: string + type: object + nullable: true + type: array + type: object + roles: + description: Roles is a list of roles associated with the token, that + will be converted to metadata in the SSH and X509 certificates issued + to the user of the token + items: + type: string + nullable: true + type: array + suggested_agent_matcher_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: SuggestedAgentMatcherLabels is a set of labels to be + used by agents to match on resources. When an agent uses this token, + the agent should monitor resources that match those labels. For + databases, this means adding the labels to `db_service.resources.labels`. + Currently, only node-join scripts create a configuration according + to the suggestion. + type: object + suggested_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: SuggestedLabels is a set of labels that resources should + set when using this token to enroll themselves in the cluster. Currently, + only node-join scripts create a configuration according to the suggestion. + type: object + type: object + status: + description: TeleportProvisionTokenStatus defines the observed state of + TeleportProvisionToken + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml new file mode 100644 index 0000000..b305702 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml @@ -0,0 +1,2386 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportroles.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportRole + listKind: TeleportRoleList + plural: teleportroles + singular: teleportrole + scope: Namespaced + versions: + - name: v5 + schema: + openAPIV3Schema: + description: Role is the Schema for the roles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Role resource definition v5 from Teleport + properties: + allow: + description: Allow is the set of conditions evaluated to grant access. + properties: + app_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: AppLabels is a map of labels used as part of the + RBAC system. + type: object + app_labels_expression: + description: AppLabelsExpression is a predicate expression used + to allow/deny access to Apps. + type: string + aws_role_arns: + description: AWSRoleARNs is a list of AWS role ARNs this role + is allowed to assume. + items: + type: string + nullable: true + type: array + azure_identities: + description: AzureIdentities is a list of Azure identities this + role is allowed to assume. + items: + type: string + nullable: true + type: array + cluster_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: ClusterLabels is a map of node labels (used to dynamically + grant access to clusters). + type: object + cluster_labels_expression: + description: ClusterLabelsExpression is a predicate expression + used to allow/deny access to remote Teleport clusters. + type: string + db_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseLabels are used in RBAC system to allow/deny + access to databases. + type: object + db_labels_expression: + description: DatabaseLabelsExpression is a predicate expression + used to allow/deny access to Databases. + type: string + db_names: + description: DatabaseNames is a list of database names this role + is allowed to connect to. + items: + type: string + nullable: true + type: array + db_roles: + description: DatabaseRoles is a list of databases roles for automatic + user creation. + items: + type: string + nullable: true + type: array + db_service_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseServiceLabels are used in RBAC system to + allow/deny access to Database Services. + type: object + db_service_labels_expression: + description: DatabaseServiceLabelsExpression is a predicate expression + used to allow/deny access to Database Services. + type: string + db_users: + description: DatabaseUsers is a list of databases users this role + is allowed to connect as. + items: + type: string + nullable: true + type: array + desktop_groups: + description: DesktopGroups is a list of groups for created desktop + users to be added to + items: + type: string + nullable: true + type: array + gcp_service_accounts: + description: GCPServiceAccounts is a list of GCP service accounts + this role is allowed to assume. + items: + type: string + nullable: true + type: array + group_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: GroupLabels is a map of labels used as part of the + RBAC system. + type: object + group_labels_expression: + description: GroupLabelsExpression is a predicate expression used + to allow/deny access to user groups. + type: string + host_groups: + description: HostGroups is a list of groups for created users + to be added to + items: + type: string + nullable: true + type: array + host_sudoers: + description: HostSudoers is a list of entries to include in a + users sudoer file + items: + type: string + nullable: true + type: array + impersonate: + description: Impersonate specifies what users and roles this role + is allowed to impersonate by issuing certificates or other possible + means. + nullable: true + properties: + roles: + description: Roles is a list of resources this role is allowed + to impersonate + items: + type: string + nullable: true + type: array + users: + description: Users is a list of resources this role is allowed + to impersonate, could be an empty list or a Wildcard pattern + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + join_sessions: + description: JoinSessions specifies policies to allow users to + join other sessions. + items: + properties: + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is a list of permitted participant modes + for this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + roles: + description: Roles is a list of roles that you can join + the session of. + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + kubernetes_groups: + description: KubeGroups is a list of kubernetes groups + items: + type: string + nullable: true + type: array + kubernetes_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: KubernetesLabels is a map of kubernetes cluster labels + used for RBAC. + type: object + kubernetes_labels_expression: + description: KubernetesLabelsExpression is a predicate expression + used to allow/deny access to kubernetes clusters. + type: string + kubernetes_resources: + description: KubernetesResources is the Kubernetes Resources this + Role grants access to. + items: + properties: + kind: + description: Kind specifies the Kubernetes Resource type. + At the moment only "pod" is supported. + type: string + name: + description: Name is the resource name. It supports wildcards. + type: string + namespace: + description: Namespace is the resource namespace. It supports + wildcards. + type: string + type: object + type: array + kubernetes_users: + description: KubeUsers is an optional kubernetes users to impersonate + items: + type: string + nullable: true + type: array + logins: + description: Logins is a list of *nix system logins. + items: + type: string + nullable: true + type: array + node_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: NodeLabels is a map of node labels (used to dynamically + grant access to nodes). + type: object + node_labels_expression: + description: NodeLabelsExpression is a predicate expression used + to allow/deny access to SSH nodes. + type: string + request: + nullable: true + properties: + annotations: + additionalProperties: + items: + type: string + type: array + description: Annotations is a collection of annotations to + be programmatically appended to pending access requests + at the time of their creation. These annotations serve as + a mechanism to propagate extra information to plugins. Since + these annotations support variable interpolation syntax, + they also offer a mechanism for forwarding claims from an + external identity provider, to a plugin via {{ `{{external.trait_name}}` }} + style substitutions. + type: object + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + max_duration: + description: MaxDuration is the amount of time the access + will be granted for. If this is zero, the default duration + is used. + format: duration + type: string + roles: + description: Roles is the name of roles which will match the + request rule. + items: + type: string + nullable: true + type: array + search_as_roles: + description: SearchAsRoles is a list of extra roles which + should apply to a user while they are searching for resources + as part of a Resource Access Request, and defines the underlying + roles which will be requested as part of any Resource Access + Request. + items: + type: string + nullable: true + type: array + suggested_reviewers: + description: SuggestedReviewers is a list of reviewer suggestions. These + can be teleport usernames, but that is not a requirement. + items: + type: string + nullable: true + type: array + thresholds: + description: Thresholds is a list of thresholds, one of which + must be met in order for reviews to trigger a state-transition. If + no thresholds are provided, a default threshold of 1 for + approval and denial is used. + items: + properties: + approve: + description: Approve is the number of matching approvals + needed for state-transition. + format: int32 + type: integer + deny: + description: Deny is the number of denials needed for + state-transition. + format: int32 + type: integer + filter: + description: Filter is an optional predicate used to + determine which reviews count toward this threshold. + type: string + name: + description: Name is the optional human-readable name + of the threshold. + type: string + type: object + type: array + type: object + require_session_join: + description: RequireSessionJoin specifies policies for required + users to start a session. + items: + properties: + count: + description: Count is the amount of people that need to + be matched for this policy to be fulfilled. + format: int32 + type: integer + filter: + description: Filter is a predicate that determines what + users count towards this policy. + type: string + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is the list of modes that may be used + to fulfill this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + on_leave: + description: OnLeave is the behaviour that's used when the + policy is no longer fulfilled for a live session. + type: string + type: object + nullable: true + type: array + review_requests: + description: ReviewRequests defines conditions for submitting + access reviews. + nullable: true + properties: + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + preview_as_roles: + description: PreviewAsRoles is a list of extra roles which + should apply to a reviewer while they are viewing a Resource + Access Request for the purposes of viewing details such + as the hostname and labels of requested resources. + items: + type: string + nullable: true + type: array + roles: + description: Roles is the name of roles which may be reviewed. + items: + type: string + nullable: true + type: array + where: + description: Where is an optional predicate which further + limits which requests are reviewable. + type: string + type: object + rules: + description: Rules is a list of rules and their access levels. + Rules are a high level construct used for access control. + items: + properties: + actions: + description: Actions specifies optional actions taken when + this rule matches + items: + type: string + nullable: true + type: array + resources: + description: Resources is a list of resources + items: + type: string + nullable: true + type: array + verbs: + description: Verbs is a list of verbs + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + type: array + windows_desktop_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WindowsDesktopLabels are used in the RBAC system + to allow/deny access to Windows desktops. + type: object + windows_desktop_labels_expression: + description: WindowsDesktopLabelsExpression is a predicate expression + used to allow/deny access to Windows desktops. + type: string + windows_desktop_logins: + description: WindowsDesktopLogins is a list of desktop login names + allowed/denied for Windows desktops. + items: + type: string + nullable: true + type: array + type: object + deny: + description: Deny is the set of conditions evaluated to deny access. + Deny takes priority over allow. + properties: + app_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: AppLabels is a map of labels used as part of the + RBAC system. + type: object + app_labels_expression: + description: AppLabelsExpression is a predicate expression used + to allow/deny access to Apps. + type: string + aws_role_arns: + description: AWSRoleARNs is a list of AWS role ARNs this role + is allowed to assume. + items: + type: string + nullable: true + type: array + azure_identities: + description: AzureIdentities is a list of Azure identities this + role is allowed to assume. + items: + type: string + nullable: true + type: array + cluster_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: ClusterLabels is a map of node labels (used to dynamically + grant access to clusters). + type: object + cluster_labels_expression: + description: ClusterLabelsExpression is a predicate expression + used to allow/deny access to remote Teleport clusters. + type: string + db_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseLabels are used in RBAC system to allow/deny + access to databases. + type: object + db_labels_expression: + description: DatabaseLabelsExpression is a predicate expression + used to allow/deny access to Databases. + type: string + db_names: + description: DatabaseNames is a list of database names this role + is allowed to connect to. + items: + type: string + nullable: true + type: array + db_roles: + description: DatabaseRoles is a list of databases roles for automatic + user creation. + items: + type: string + nullable: true + type: array + db_service_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseServiceLabels are used in RBAC system to + allow/deny access to Database Services. + type: object + db_service_labels_expression: + description: DatabaseServiceLabelsExpression is a predicate expression + used to allow/deny access to Database Services. + type: string + db_users: + description: DatabaseUsers is a list of databases users this role + is allowed to connect as. + items: + type: string + nullable: true + type: array + desktop_groups: + description: DesktopGroups is a list of groups for created desktop + users to be added to + items: + type: string + nullable: true + type: array + gcp_service_accounts: + description: GCPServiceAccounts is a list of GCP service accounts + this role is allowed to assume. + items: + type: string + nullable: true + type: array + group_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: GroupLabels is a map of labels used as part of the + RBAC system. + type: object + group_labels_expression: + description: GroupLabelsExpression is a predicate expression used + to allow/deny access to user groups. + type: string + host_groups: + description: HostGroups is a list of groups for created users + to be added to + items: + type: string + nullable: true + type: array + host_sudoers: + description: HostSudoers is a list of entries to include in a + users sudoer file + items: + type: string + nullable: true + type: array + impersonate: + description: Impersonate specifies what users and roles this role + is allowed to impersonate by issuing certificates or other possible + means. + nullable: true + properties: + roles: + description: Roles is a list of resources this role is allowed + to impersonate + items: + type: string + nullable: true + type: array + users: + description: Users is a list of resources this role is allowed + to impersonate, could be an empty list or a Wildcard pattern + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + join_sessions: + description: JoinSessions specifies policies to allow users to + join other sessions. + items: + properties: + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is a list of permitted participant modes + for this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + roles: + description: Roles is a list of roles that you can join + the session of. + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + kubernetes_groups: + description: KubeGroups is a list of kubernetes groups + items: + type: string + nullable: true + type: array + kubernetes_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: KubernetesLabels is a map of kubernetes cluster labels + used for RBAC. + type: object + kubernetes_labels_expression: + description: KubernetesLabelsExpression is a predicate expression + used to allow/deny access to kubernetes clusters. + type: string + kubernetes_resources: + description: KubernetesResources is the Kubernetes Resources this + Role grants access to. + items: + properties: + kind: + description: Kind specifies the Kubernetes Resource type. + At the moment only "pod" is supported. + type: string + name: + description: Name is the resource name. It supports wildcards. + type: string + namespace: + description: Namespace is the resource namespace. It supports + wildcards. + type: string + type: object + type: array + kubernetes_users: + description: KubeUsers is an optional kubernetes users to impersonate + items: + type: string + nullable: true + type: array + logins: + description: Logins is a list of *nix system logins. + items: + type: string + nullable: true + type: array + node_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: NodeLabels is a map of node labels (used to dynamically + grant access to nodes). + type: object + node_labels_expression: + description: NodeLabelsExpression is a predicate expression used + to allow/deny access to SSH nodes. + type: string + request: + nullable: true + properties: + annotations: + additionalProperties: + items: + type: string + type: array + description: Annotations is a collection of annotations to + be programmatically appended to pending access requests + at the time of their creation. These annotations serve as + a mechanism to propagate extra information to plugins. Since + these annotations support variable interpolation syntax, + they also offer a mechanism for forwarding claims from an + external identity provider, to a plugin via {{ `{{external.trait_name}}` }} + style substitutions. + type: object + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + max_duration: + description: MaxDuration is the amount of time the access + will be granted for. If this is zero, the default duration + is used. + format: duration + type: string + roles: + description: Roles is the name of roles which will match the + request rule. + items: + type: string + nullable: true + type: array + search_as_roles: + description: SearchAsRoles is a list of extra roles which + should apply to a user while they are searching for resources + as part of a Resource Access Request, and defines the underlying + roles which will be requested as part of any Resource Access + Request. + items: + type: string + nullable: true + type: array + suggested_reviewers: + description: SuggestedReviewers is a list of reviewer suggestions. These + can be teleport usernames, but that is not a requirement. + items: + type: string + nullable: true + type: array + thresholds: + description: Thresholds is a list of thresholds, one of which + must be met in order for reviews to trigger a state-transition. If + no thresholds are provided, a default threshold of 1 for + approval and denial is used. + items: + properties: + approve: + description: Approve is the number of matching approvals + needed for state-transition. + format: int32 + type: integer + deny: + description: Deny is the number of denials needed for + state-transition. + format: int32 + type: integer + filter: + description: Filter is an optional predicate used to + determine which reviews count toward this threshold. + type: string + name: + description: Name is the optional human-readable name + of the threshold. + type: string + type: object + type: array + type: object + require_session_join: + description: RequireSessionJoin specifies policies for required + users to start a session. + items: + properties: + count: + description: Count is the amount of people that need to + be matched for this policy to be fulfilled. + format: int32 + type: integer + filter: + description: Filter is a predicate that determines what + users count towards this policy. + type: string + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is the list of modes that may be used + to fulfill this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + on_leave: + description: OnLeave is the behaviour that's used when the + policy is no longer fulfilled for a live session. + type: string + type: object + nullable: true + type: array + review_requests: + description: ReviewRequests defines conditions for submitting + access reviews. + nullable: true + properties: + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + preview_as_roles: + description: PreviewAsRoles is a list of extra roles which + should apply to a reviewer while they are viewing a Resource + Access Request for the purposes of viewing details such + as the hostname and labels of requested resources. + items: + type: string + nullable: true + type: array + roles: + description: Roles is the name of roles which may be reviewed. + items: + type: string + nullable: true + type: array + where: + description: Where is an optional predicate which further + limits which requests are reviewable. + type: string + type: object + rules: + description: Rules is a list of rules and their access levels. + Rules are a high level construct used for access control. + items: + properties: + actions: + description: Actions specifies optional actions taken when + this rule matches + items: + type: string + nullable: true + type: array + resources: + description: Resources is a list of resources + items: + type: string + nullable: true + type: array + verbs: + description: Verbs is a list of verbs + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + type: array + windows_desktop_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WindowsDesktopLabels are used in the RBAC system + to allow/deny access to Windows desktops. + type: object + windows_desktop_labels_expression: + description: WindowsDesktopLabelsExpression is a predicate expression + used to allow/deny access to Windows desktops. + type: string + windows_desktop_logins: + description: WindowsDesktopLogins is a list of desktop login names + allowed/denied for Windows desktops. + items: + type: string + nullable: true + type: array + type: object + options: + description: Options is for OpenSSH options like agent forwarding. + properties: + cert_extensions: + description: CertExtensions specifies the key/values + items: + properties: + mode: + description: Mode is the type of extension to be used -- + currently critical-option is not supported + format: int32 + type: integer + name: + description: Name specifies the key to be used in the cert + extension. + type: string + type: + description: Type represents the certificate type being + extended, only ssh is supported at this time. + format: int32 + type: integer + value: + description: Value specifies the value to be used in the + cert extension. + type: string + type: object + nullable: true + type: array + cert_format: + description: CertificateFormat defines the format of the user + certificate to allow compatibility with older versions of OpenSSH. + type: string + client_idle_timeout: + description: ClientIdleTimeout sets disconnect clients on idle + timeout behavior, if set to 0 means do not disconnect, otherwise + is set to the idle duration. + format: duration + type: string + create_db_user: + description: CreateDatabaseUser enabled automatic database user + creation. + type: boolean + create_desktop_user: + description: CreateDesktopUser allows users to be automatically + created on a Windows desktop + type: boolean + create_host_user: + description: CreateHostUser allows users to be automatically created + on a host + type: boolean + create_host_user_mode: + description: CreateHostUserMode allows users to be automatically + created on a host when not set to off + format: int32 + type: integer + desktop_clipboard: + description: DesktopClipboard indicates whether clipboard sharing + is allowed between the user's workstation and the remote desktop. + It defaults to true unless explicitly set to false. + type: boolean + desktop_directory_sharing: + description: DesktopDirectorySharing indicates whether directory + sharing is allowed between the user's workstation and the remote + desktop. It defaults to false unless explicitly set to true. + type: boolean + device_trust_mode: + description: DeviceTrustMode is the device authorization mode + used for the resources associated with the role. See DeviceTrust.Mode. + Reserved for future use, not yet used by Teleport. + type: string + disconnect_expired_cert: + description: DisconnectExpiredCert sets disconnect clients on + expired certificates. + type: boolean + enhanced_recording: + description: BPF defines what events to record for the BPF-based + session recorder. + items: + type: string + nullable: true + type: array + forward_agent: + description: ForwardAgent is SSH agent forwarding. + type: boolean + idp: + description: IDP is a set of options related to accessing IdPs + within Teleport. Requires Teleport Enterprise. + nullable: true + properties: + saml: + description: SAML are options related to the Teleport SAML + IdP. + nullable: true + properties: + enabled: + description: Enabled is set to true if this option allows + access to the Teleport SAML IdP. + type: boolean + type: object + type: object + lock: + description: Lock specifies the locking mode (strict|best_effort) + to be applied with the role. + type: string + max_connections: + description: MaxConnections defines the maximum number of concurrent + connections a user may hold. + format: int64 + type: integer + max_kubernetes_connections: + description: MaxKubernetesConnections defines the maximum number + of concurrent Kubernetes sessions a user may hold. + format: int64 + type: integer + max_session_ttl: + description: MaxSessionTTL defines how long a SSH session can + last for. + format: duration + type: string + max_sessions: + description: MaxSessions defines the maximum number of concurrent + sessions per connection. + format: int64 + type: integer + permit_x11_forwarding: + description: PermitX11Forwarding authorizes use of X11 forwarding. + type: boolean + pin_source_ip: + description: PinSourceIP forces the same client IP for certificate + generation and usage + type: boolean + port_forwarding: + description: PortForwarding defines if the certificate will have + "permit-port-forwarding" in the certificate. PortForwarding + is "yes" if not set, that's why this is a pointer + type: boolean + record_session: + description: RecordDesktopSession indicates whether desktop access + sessions should be recorded. It defaults to true unless explicitly + set to false. + nullable: true + properties: + default: + description: Default indicates the default value for the services. + type: string + desktop: + description: Desktop indicates whether desktop sessions should + be recorded. It defaults to true unless explicitly set to + false. + type: boolean + ssh: + description: SSH indicates the session mode used on SSH sessions. + type: string + type: object + request_access: + description: RequestAccess defines the access request strategy + (optional|note|always) where optional is the default. + type: string + request_prompt: + description: RequestPrompt is an optional message which tells + users what they aught to + type: string + require_session_mfa: + description: RequireMFAType is the type of MFA requirement enforced + for this user. + format: int32 + type: integer + ssh_file_copy: + description: SSHFileCopy indicates whether remote file operations + via SCP or SFTP are allowed over an SSH session. It defaults + to true unless explicitly set to false. + type: boolean + type: object + type: object + status: + description: TeleportRoleStatus defines the observed state of TeleportRole + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} + - name: v6 + schema: + openAPIV3Schema: + description: Role is the Schema for the roles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Role resource definition v6 from Teleport + properties: + allow: + description: Allow is the set of conditions evaluated to grant access. + properties: + app_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: AppLabels is a map of labels used as part of the + RBAC system. + type: object + app_labels_expression: + description: AppLabelsExpression is a predicate expression used + to allow/deny access to Apps. + type: string + aws_role_arns: + description: AWSRoleARNs is a list of AWS role ARNs this role + is allowed to assume. + items: + type: string + nullable: true + type: array + azure_identities: + description: AzureIdentities is a list of Azure identities this + role is allowed to assume. + items: + type: string + nullable: true + type: array + cluster_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: ClusterLabels is a map of node labels (used to dynamically + grant access to clusters). + type: object + cluster_labels_expression: + description: ClusterLabelsExpression is a predicate expression + used to allow/deny access to remote Teleport clusters. + type: string + db_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseLabels are used in RBAC system to allow/deny + access to databases. + type: object + db_labels_expression: + description: DatabaseLabelsExpression is a predicate expression + used to allow/deny access to Databases. + type: string + db_names: + description: DatabaseNames is a list of database names this role + is allowed to connect to. + items: + type: string + nullable: true + type: array + db_roles: + description: DatabaseRoles is a list of databases roles for automatic + user creation. + items: + type: string + nullable: true + type: array + db_service_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseServiceLabels are used in RBAC system to + allow/deny access to Database Services. + type: object + db_service_labels_expression: + description: DatabaseServiceLabelsExpression is a predicate expression + used to allow/deny access to Database Services. + type: string + db_users: + description: DatabaseUsers is a list of databases users this role + is allowed to connect as. + items: + type: string + nullable: true + type: array + desktop_groups: + description: DesktopGroups is a list of groups for created desktop + users to be added to + items: + type: string + nullable: true + type: array + gcp_service_accounts: + description: GCPServiceAccounts is a list of GCP service accounts + this role is allowed to assume. + items: + type: string + nullable: true + type: array + group_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: GroupLabels is a map of labels used as part of the + RBAC system. + type: object + group_labels_expression: + description: GroupLabelsExpression is a predicate expression used + to allow/deny access to user groups. + type: string + host_groups: + description: HostGroups is a list of groups for created users + to be added to + items: + type: string + nullable: true + type: array + host_sudoers: + description: HostSudoers is a list of entries to include in a + users sudoer file + items: + type: string + nullable: true + type: array + impersonate: + description: Impersonate specifies what users and roles this role + is allowed to impersonate by issuing certificates or other possible + means. + nullable: true + properties: + roles: + description: Roles is a list of resources this role is allowed + to impersonate + items: + type: string + nullable: true + type: array + users: + description: Users is a list of resources this role is allowed + to impersonate, could be an empty list or a Wildcard pattern + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + join_sessions: + description: JoinSessions specifies policies to allow users to + join other sessions. + items: + properties: + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is a list of permitted participant modes + for this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + roles: + description: Roles is a list of roles that you can join + the session of. + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + kubernetes_groups: + description: KubeGroups is a list of kubernetes groups + items: + type: string + nullable: true + type: array + kubernetes_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: KubernetesLabels is a map of kubernetes cluster labels + used for RBAC. + type: object + kubernetes_labels_expression: + description: KubernetesLabelsExpression is a predicate expression + used to allow/deny access to kubernetes clusters. + type: string + kubernetes_resources: + description: KubernetesResources is the Kubernetes Resources this + Role grants access to. + items: + properties: + kind: + description: Kind specifies the Kubernetes Resource type. + At the moment only "pod" is supported. + type: string + name: + description: Name is the resource name. It supports wildcards. + type: string + namespace: + description: Namespace is the resource namespace. It supports + wildcards. + type: string + type: object + type: array + kubernetes_users: + description: KubeUsers is an optional kubernetes users to impersonate + items: + type: string + nullable: true + type: array + logins: + description: Logins is a list of *nix system logins. + items: + type: string + nullable: true + type: array + node_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: NodeLabels is a map of node labels (used to dynamically + grant access to nodes). + type: object + node_labels_expression: + description: NodeLabelsExpression is a predicate expression used + to allow/deny access to SSH nodes. + type: string + request: + nullable: true + properties: + annotations: + additionalProperties: + items: + type: string + type: array + description: Annotations is a collection of annotations to + be programmatically appended to pending access requests + at the time of their creation. These annotations serve as + a mechanism to propagate extra information to plugins. Since + these annotations support variable interpolation syntax, + they also offer a mechanism for forwarding claims from an + external identity provider, to a plugin via {{ `{{external.trait_name}}` }} + style substitutions. + type: object + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + max_duration: + description: MaxDuration is the amount of time the access + will be granted for. If this is zero, the default duration + is used. + format: duration + type: string + roles: + description: Roles is the name of roles which will match the + request rule. + items: + type: string + nullable: true + type: array + search_as_roles: + description: SearchAsRoles is a list of extra roles which + should apply to a user while they are searching for resources + as part of a Resource Access Request, and defines the underlying + roles which will be requested as part of any Resource Access + Request. + items: + type: string + nullable: true + type: array + suggested_reviewers: + description: SuggestedReviewers is a list of reviewer suggestions. These + can be teleport usernames, but that is not a requirement. + items: + type: string + nullable: true + type: array + thresholds: + description: Thresholds is a list of thresholds, one of which + must be met in order for reviews to trigger a state-transition. If + no thresholds are provided, a default threshold of 1 for + approval and denial is used. + items: + properties: + approve: + description: Approve is the number of matching approvals + needed for state-transition. + format: int32 + type: integer + deny: + description: Deny is the number of denials needed for + state-transition. + format: int32 + type: integer + filter: + description: Filter is an optional predicate used to + determine which reviews count toward this threshold. + type: string + name: + description: Name is the optional human-readable name + of the threshold. + type: string + type: object + type: array + type: object + require_session_join: + description: RequireSessionJoin specifies policies for required + users to start a session. + items: + properties: + count: + description: Count is the amount of people that need to + be matched for this policy to be fulfilled. + format: int32 + type: integer + filter: + description: Filter is a predicate that determines what + users count towards this policy. + type: string + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is the list of modes that may be used + to fulfill this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + on_leave: + description: OnLeave is the behaviour that's used when the + policy is no longer fulfilled for a live session. + type: string + type: object + nullable: true + type: array + review_requests: + description: ReviewRequests defines conditions for submitting + access reviews. + nullable: true + properties: + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + preview_as_roles: + description: PreviewAsRoles is a list of extra roles which + should apply to a reviewer while they are viewing a Resource + Access Request for the purposes of viewing details such + as the hostname and labels of requested resources. + items: + type: string + nullable: true + type: array + roles: + description: Roles is the name of roles which may be reviewed. + items: + type: string + nullable: true + type: array + where: + description: Where is an optional predicate which further + limits which requests are reviewable. + type: string + type: object + rules: + description: Rules is a list of rules and their access levels. + Rules are a high level construct used for access control. + items: + properties: + actions: + description: Actions specifies optional actions taken when + this rule matches + items: + type: string + nullable: true + type: array + resources: + description: Resources is a list of resources + items: + type: string + nullable: true + type: array + verbs: + description: Verbs is a list of verbs + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + type: array + windows_desktop_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WindowsDesktopLabels are used in the RBAC system + to allow/deny access to Windows desktops. + type: object + windows_desktop_labels_expression: + description: WindowsDesktopLabelsExpression is a predicate expression + used to allow/deny access to Windows desktops. + type: string + windows_desktop_logins: + description: WindowsDesktopLogins is a list of desktop login names + allowed/denied for Windows desktops. + items: + type: string + nullable: true + type: array + type: object + deny: + description: Deny is the set of conditions evaluated to deny access. + Deny takes priority over allow. + properties: + app_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: AppLabels is a map of labels used as part of the + RBAC system. + type: object + app_labels_expression: + description: AppLabelsExpression is a predicate expression used + to allow/deny access to Apps. + type: string + aws_role_arns: + description: AWSRoleARNs is a list of AWS role ARNs this role + is allowed to assume. + items: + type: string + nullable: true + type: array + azure_identities: + description: AzureIdentities is a list of Azure identities this + role is allowed to assume. + items: + type: string + nullable: true + type: array + cluster_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: ClusterLabels is a map of node labels (used to dynamically + grant access to clusters). + type: object + cluster_labels_expression: + description: ClusterLabelsExpression is a predicate expression + used to allow/deny access to remote Teleport clusters. + type: string + db_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseLabels are used in RBAC system to allow/deny + access to databases. + type: object + db_labels_expression: + description: DatabaseLabelsExpression is a predicate expression + used to allow/deny access to Databases. + type: string + db_names: + description: DatabaseNames is a list of database names this role + is allowed to connect to. + items: + type: string + nullable: true + type: array + db_roles: + description: DatabaseRoles is a list of databases roles for automatic + user creation. + items: + type: string + nullable: true + type: array + db_service_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: DatabaseServiceLabels are used in RBAC system to + allow/deny access to Database Services. + type: object + db_service_labels_expression: + description: DatabaseServiceLabelsExpression is a predicate expression + used to allow/deny access to Database Services. + type: string + db_users: + description: DatabaseUsers is a list of databases users this role + is allowed to connect as. + items: + type: string + nullable: true + type: array + desktop_groups: + description: DesktopGroups is a list of groups for created desktop + users to be added to + items: + type: string + nullable: true + type: array + gcp_service_accounts: + description: GCPServiceAccounts is a list of GCP service accounts + this role is allowed to assume. + items: + type: string + nullable: true + type: array + group_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: GroupLabels is a map of labels used as part of the + RBAC system. + type: object + group_labels_expression: + description: GroupLabelsExpression is a predicate expression used + to allow/deny access to user groups. + type: string + host_groups: + description: HostGroups is a list of groups for created users + to be added to + items: + type: string + nullable: true + type: array + host_sudoers: + description: HostSudoers is a list of entries to include in a + users sudoer file + items: + type: string + nullable: true + type: array + impersonate: + description: Impersonate specifies what users and roles this role + is allowed to impersonate by issuing certificates or other possible + means. + nullable: true + properties: + roles: + description: Roles is a list of resources this role is allowed + to impersonate + items: + type: string + nullable: true + type: array + users: + description: Users is a list of resources this role is allowed + to impersonate, could be an empty list or a Wildcard pattern + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + join_sessions: + description: JoinSessions specifies policies to allow users to + join other sessions. + items: + properties: + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is a list of permitted participant modes + for this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + roles: + description: Roles is a list of roles that you can join + the session of. + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + kubernetes_groups: + description: KubeGroups is a list of kubernetes groups + items: + type: string + nullable: true + type: array + kubernetes_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: KubernetesLabels is a map of kubernetes cluster labels + used for RBAC. + type: object + kubernetes_labels_expression: + description: KubernetesLabelsExpression is a predicate expression + used to allow/deny access to kubernetes clusters. + type: string + kubernetes_resources: + description: KubernetesResources is the Kubernetes Resources this + Role grants access to. + items: + properties: + kind: + description: Kind specifies the Kubernetes Resource type. + At the moment only "pod" is supported. + type: string + name: + description: Name is the resource name. It supports wildcards. + type: string + namespace: + description: Namespace is the resource namespace. It supports + wildcards. + type: string + type: object + type: array + kubernetes_users: + description: KubeUsers is an optional kubernetes users to impersonate + items: + type: string + nullable: true + type: array + logins: + description: Logins is a list of *nix system logins. + items: + type: string + nullable: true + type: array + node_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: NodeLabels is a map of node labels (used to dynamically + grant access to nodes). + type: object + node_labels_expression: + description: NodeLabelsExpression is a predicate expression used + to allow/deny access to SSH nodes. + type: string + request: + nullable: true + properties: + annotations: + additionalProperties: + items: + type: string + type: array + description: Annotations is a collection of annotations to + be programmatically appended to pending access requests + at the time of their creation. These annotations serve as + a mechanism to propagate extra information to plugins. Since + these annotations support variable interpolation syntax, + they also offer a mechanism for forwarding claims from an + external identity provider, to a plugin via {{ `{{external.trait_name}}` }} + style substitutions. + type: object + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + max_duration: + description: MaxDuration is the amount of time the access + will be granted for. If this is zero, the default duration + is used. + format: duration + type: string + roles: + description: Roles is the name of roles which will match the + request rule. + items: + type: string + nullable: true + type: array + search_as_roles: + description: SearchAsRoles is a list of extra roles which + should apply to a user while they are searching for resources + as part of a Resource Access Request, and defines the underlying + roles which will be requested as part of any Resource Access + Request. + items: + type: string + nullable: true + type: array + suggested_reviewers: + description: SuggestedReviewers is a list of reviewer suggestions. These + can be teleport usernames, but that is not a requirement. + items: + type: string + nullable: true + type: array + thresholds: + description: Thresholds is a list of thresholds, one of which + must be met in order for reviews to trigger a state-transition. If + no thresholds are provided, a default threshold of 1 for + approval and denial is used. + items: + properties: + approve: + description: Approve is the number of matching approvals + needed for state-transition. + format: int32 + type: integer + deny: + description: Deny is the number of denials needed for + state-transition. + format: int32 + type: integer + filter: + description: Filter is an optional predicate used to + determine which reviews count toward this threshold. + type: string + name: + description: Name is the optional human-readable name + of the threshold. + type: string + type: object + type: array + type: object + require_session_join: + description: RequireSessionJoin specifies policies for required + users to start a session. + items: + properties: + count: + description: Count is the amount of people that need to + be matched for this policy to be fulfilled. + format: int32 + type: integer + filter: + description: Filter is a predicate that determines what + users count towards this policy. + type: string + kinds: + description: Kinds are the session kinds this policy applies + to. + items: + type: string + nullable: true + type: array + modes: + description: Modes is the list of modes that may be used + to fulfill this policy. + items: + type: string + nullable: true + type: array + name: + description: Name is the name of the policy. + type: string + on_leave: + description: OnLeave is the behaviour that's used when the + policy is no longer fulfilled for a live session. + type: string + type: object + nullable: true + type: array + review_requests: + description: ReviewRequests defines conditions for submitting + access reviews. + nullable: true + properties: + claims_to_roles: + description: ClaimsToRoles specifies a mapping from claims + (traits) to teleport roles. + items: + properties: + claim: + description: Claim is a claim name. + type: string + roles: + description: Roles is a list of static teleport roles + to match. + items: + type: string + nullable: true + type: array + value: + description: Value is a claim value to match. + type: string + type: object + type: array + preview_as_roles: + description: PreviewAsRoles is a list of extra roles which + should apply to a reviewer while they are viewing a Resource + Access Request for the purposes of viewing details such + as the hostname and labels of requested resources. + items: + type: string + nullable: true + type: array + roles: + description: Roles is the name of roles which may be reviewed. + items: + type: string + nullable: true + type: array + where: + description: Where is an optional predicate which further + limits which requests are reviewable. + type: string + type: object + rules: + description: Rules is a list of rules and their access levels. + Rules are a high level construct used for access control. + items: + properties: + actions: + description: Actions specifies optional actions taken when + this rule matches + items: + type: string + nullable: true + type: array + resources: + description: Resources is a list of resources + items: + type: string + nullable: true + type: array + verbs: + description: Verbs is a list of verbs + items: + type: string + nullable: true + type: array + where: + description: Where specifies optional advanced matcher + type: string + type: object + type: array + windows_desktop_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WindowsDesktopLabels are used in the RBAC system + to allow/deny access to Windows desktops. + type: object + windows_desktop_labels_expression: + description: WindowsDesktopLabelsExpression is a predicate expression + used to allow/deny access to Windows desktops. + type: string + windows_desktop_logins: + description: WindowsDesktopLogins is a list of desktop login names + allowed/denied for Windows desktops. + items: + type: string + nullable: true + type: array + type: object + options: + description: Options is for OpenSSH options like agent forwarding. + properties: + cert_extensions: + description: CertExtensions specifies the key/values + items: + properties: + mode: + description: Mode is the type of extension to be used -- + currently critical-option is not supported + format: int32 + type: integer + name: + description: Name specifies the key to be used in the cert + extension. + type: string + type: + description: Type represents the certificate type being + extended, only ssh is supported at this time. + format: int32 + type: integer + value: + description: Value specifies the value to be used in the + cert extension. + type: string + type: object + nullable: true + type: array + cert_format: + description: CertificateFormat defines the format of the user + certificate to allow compatibility with older versions of OpenSSH. + type: string + client_idle_timeout: + description: ClientIdleTimeout sets disconnect clients on idle + timeout behavior, if set to 0 means do not disconnect, otherwise + is set to the idle duration. + format: duration + type: string + create_db_user: + description: CreateDatabaseUser enabled automatic database user + creation. + type: boolean + create_desktop_user: + description: CreateDesktopUser allows users to be automatically + created on a Windows desktop + type: boolean + create_host_user: + description: CreateHostUser allows users to be automatically created + on a host + type: boolean + create_host_user_mode: + description: CreateHostUserMode allows users to be automatically + created on a host when not set to off + format: int32 + type: integer + desktop_clipboard: + description: DesktopClipboard indicates whether clipboard sharing + is allowed between the user's workstation and the remote desktop. + It defaults to true unless explicitly set to false. + type: boolean + desktop_directory_sharing: + description: DesktopDirectorySharing indicates whether directory + sharing is allowed between the user's workstation and the remote + desktop. It defaults to false unless explicitly set to true. + type: boolean + device_trust_mode: + description: DeviceTrustMode is the device authorization mode + used for the resources associated with the role. See DeviceTrust.Mode. + Reserved for future use, not yet used by Teleport. + type: string + disconnect_expired_cert: + description: DisconnectExpiredCert sets disconnect clients on + expired certificates. + type: boolean + enhanced_recording: + description: BPF defines what events to record for the BPF-based + session recorder. + items: + type: string + nullable: true + type: array + forward_agent: + description: ForwardAgent is SSH agent forwarding. + type: boolean + idp: + description: IDP is a set of options related to accessing IdPs + within Teleport. Requires Teleport Enterprise. + nullable: true + properties: + saml: + description: SAML are options related to the Teleport SAML + IdP. + nullable: true + properties: + enabled: + description: Enabled is set to true if this option allows + access to the Teleport SAML IdP. + type: boolean + type: object + type: object + lock: + description: Lock specifies the locking mode (strict|best_effort) + to be applied with the role. + type: string + max_connections: + description: MaxConnections defines the maximum number of concurrent + connections a user may hold. + format: int64 + type: integer + max_kubernetes_connections: + description: MaxKubernetesConnections defines the maximum number + of concurrent Kubernetes sessions a user may hold. + format: int64 + type: integer + max_session_ttl: + description: MaxSessionTTL defines how long a SSH session can + last for. + format: duration + type: string + max_sessions: + description: MaxSessions defines the maximum number of concurrent + sessions per connection. + format: int64 + type: integer + permit_x11_forwarding: + description: PermitX11Forwarding authorizes use of X11 forwarding. + type: boolean + pin_source_ip: + description: PinSourceIP forces the same client IP for certificate + generation and usage + type: boolean + port_forwarding: + description: PortForwarding defines if the certificate will have + "permit-port-forwarding" in the certificate. PortForwarding + is "yes" if not set, that's why this is a pointer + type: boolean + record_session: + description: RecordDesktopSession indicates whether desktop access + sessions should be recorded. It defaults to true unless explicitly + set to false. + nullable: true + properties: + default: + description: Default indicates the default value for the services. + type: string + desktop: + description: Desktop indicates whether desktop sessions should + be recorded. It defaults to true unless explicitly set to + false. + type: boolean + ssh: + description: SSH indicates the session mode used on SSH sessions. + type: string + type: object + request_access: + description: RequestAccess defines the access request strategy + (optional|note|always) where optional is the default. + type: string + request_prompt: + description: RequestPrompt is an optional message which tells + users what they aught to + type: string + require_session_mfa: + description: RequireMFAType is the type of MFA requirement enforced + for this user. + format: int32 + type: integer + ssh_file_copy: + description: SSHFileCopy indicates whether remote file operations + via SCP or SFTP are allowed over an SSH session. It defaults + to true unless explicitly set to false. + type: boolean + type: object + type: object + status: + description: TeleportRoleStatus defines the observed state of TeleportRole + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_samlconnectors.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_samlconnectors.yaml new file mode 100644 index 0000000..c86cc91 --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_samlconnectors.yaml @@ -0,0 +1,210 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportsamlconnectors.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportSAMLConnector + listKind: TeleportSAMLConnectorList + plural: teleportsamlconnectors + shortNames: + - samlconnector + - samlconnectors + singular: teleportsamlconnector + scope: Namespaced + versions: + - name: v2 + schema: + openAPIV3Schema: + description: SAMLConnector is the Schema for the samlconnectors API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SAMLConnector resource definition v2 from Teleport + properties: + acs: + description: AssertionConsumerService is a URL for assertion consumer + service on the service provider (Teleport's side). + type: string + allow_idp_initiated: + description: AllowIDPInitiated is a flag that indicates if the connector + can be used for IdP-initiated logins. + type: boolean + assertion_key_pair: + description: EncryptionKeyPair is a key pair used for decrypting SAML + assertions. + nullable: true + properties: + cert: + description: Cert is a PEM-encoded x509 certificate. + type: string + private_key: + description: PrivateKey is a PEM encoded x509 private key. + type: string + type: object + attributes_to_roles: + description: AttributesToRoles is a list of mappings of attribute + statements to roles. + items: + properties: + name: + description: Name is an attribute statement name. + type: string + roles: + description: Roles is a list of static teleport roles to map + to. + items: + type: string + nullable: true + type: array + value: + description: Value is an attribute statement value to match. + type: string + type: object + type: array + audience: + description: Audience uniquely identifies our service provider. + type: string + cert: + description: Cert is the identity provider certificate PEM. IDP signs + responses using this certificate. + type: string + display: + description: Display controls how this connector is displayed. + type: string + entity_descriptor: + description: EntityDescriptor is XML with descriptor. It can be used + to supply configuration parameters in one XML file rather than supplying + them in the individual elements. + type: string + entity_descriptor_url: + description: EntityDescriptorURL is a URL that supplies a configuration + XML. + type: string + issuer: + description: Issuer is the identity provider issuer. + type: string + provider: + description: Provider is the external identity provider. + type: string + service_provider_issuer: + description: ServiceProviderIssuer is the issuer of the service provider + (Teleport). + type: string + signing_key_pair: + description: SigningKeyPair is an x509 key pair used to sign AuthnRequest. + nullable: true + properties: + cert: + description: Cert is a PEM-encoded x509 certificate. + type: string + private_key: + description: PrivateKey is a PEM encoded x509 private key. + type: string + type: object + sso: + description: SSO is the URL of the identity provider's SSO service. + type: string + type: object + status: + description: TeleportSAMLConnectorStatus defines the observed state of + TeleportSAMLConnector + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_users.yaml b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_users.yaml new file mode 100644 index 0000000..7e41bac --- /dev/null +++ b/helm/old/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_users.yaml @@ -0,0 +1,195 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleportusers.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportUser + listKind: TeleportUserList + plural: teleportusers + shortNames: + - user + - users + singular: teleportuser + scope: Namespaced + versions: + - name: v2 + schema: + openAPIV3Schema: + description: User is the Schema for the users API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: User resource definition v2 from Teleport + properties: + github_identities: + description: GithubIdentities list associated Github OAuth2 identities + that let user log in using externally verified identity + items: + properties: + connector_id: + description: ConnectorID is id of registered OIDC connector, + e.g. 'google-example.com' + type: string + username: + description: Username is username supplied by external identity + provider + type: string + type: object + type: array + oidc_identities: + description: OIDCIdentities lists associated OpenID Connect identities + that let user log in using externally verified identity + items: + properties: + connector_id: + description: ConnectorID is id of registered OIDC connector, + e.g. 'google-example.com' + type: string + username: + description: Username is username supplied by external identity + provider + type: string + type: object + type: array + roles: + description: Roles is a list of roles assigned to user + items: + type: string + nullable: true + type: array + saml_identities: + description: SAMLIdentities lists associated SAML identities that + let user log in using externally verified identity + items: + properties: + connector_id: + description: ConnectorID is id of registered OIDC connector, + e.g. 'google-example.com' + type: string + username: + description: Username is username supplied by external identity + provider + type: string + type: object + type: array + traits: + additionalProperties: + items: + type: string + type: array + description: Traits are key/value pairs received from an identity + provider (through OIDC claims or SAML assertions) or from a system + administrator for local accounts. Traits are used to populate role + variables. + type: object + trusted_device_ids: + description: TrustedDeviceIDs contains the IDs of trusted devices + enrolled by the user. Managed by the Device Trust subsystem, avoid + manual edits. + items: + type: string + nullable: true + type: array + type: object + status: + description: TeleportUserStatus defines the observed state of TeleportUser + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/helm/old/teleport-cluster/override-values.yaml b/helm/old/teleport-cluster/override-values.yaml new file mode 100644 index 0000000..4989c7f --- /dev/null +++ b/helm/old/teleport-cluster/override-values.yaml @@ -0,0 +1,45 @@ +chartMode: standalone +clusterName: teleport.kr.datasaker.io + #teleportVersionOverride: "13.3.8" + +auth: + teleportConfig: + # put any teleport.yaml auth configuration overrides here + teleport: + log: + output: stderr + severity: INFO + + auth_service: + enabled: true + web_idle_timeout: 1h + authentication: + locking_mode: best_effort + persistence: + storageClassName: openebs-hostpath + +proxy: + teleportConfig: + # put any teleport.yaml proxy configuration overrides here + teleport: + log: + output: stderr + severity: INFO + + proxy_service: + https_keypairs_reload_interval: 12h + +podSecurityPolicy: + enabled: false + +proxy_service: + web_listen_addr: 0.0.0.0:3080 + public_addr: teleport.kr.datasaker.io:443 + +resources: + requests: + cpu: "1" + memory: "2Gi" + +highAvailability: + replicaCount: 1 diff --git a/helm/teleport-cluster/teleport_svc.yaml b/helm/old/teleport-cluster/teleport_svc.yaml similarity index 100% rename from helm/teleport-cluster/teleport_svc.yaml rename to helm/old/teleport-cluster/teleport_svc.yaml diff --git a/helm/old/teleport-cluster/templates/NOTES.txt b/helm/old/teleport-cluster/templates/NOTES.txt new file mode 100644 index 0000000..f85e1fa --- /dev/null +++ b/helm/old/teleport-cluster/templates/NOTES.txt @@ -0,0 +1,35 @@ +{{- if .Values.highAvailability.certManager.enabled }} +You have enabled cert-manager support in high availability mode. + +There may be a short delay before Teleport pods start while an ACME certificate is issued. +You can check the status of the certificate with `kubectl -n {{ .Release.Namespace }} describe certificate/{{ .Release.Name }}` + +NOTE: For certificates to be provisioned, you must also install cert-manager (https://cert-manager.io/docs/) and configure an appropriate + Issuer with access to your DNS provider to handle DNS01 challenges (https://cert-manager.io/docs/configuration/acme/dns01/#supported-dns01-providers) + +For more information, please see the Helm guides in the Teleport docs (https://goteleport.com/docs/kubernetes-access/helm/guides/) +{{- end }} + +{{- if and .Values.podSecurityPolicy.enabled (semverCompare "<1.23.0-0" .Capabilities.KubeVersion.Version) }} + +SECURITY WARNING: Kubernetes 1.25 removes PodSecurityPolicy support and Helm +doesn't support upgrading from 1.24 to 1.25 with PSPs enabled. Since version 12 +the `teleport-cluster` chart doesn't deploy PSPs on Kubernetes 1.23 or older. +Instead, we recommend you to configure Pod Security AdmissionControllers for +the namespace "{{.Release.Namespace}}" by adding the label +`pod-security.kubernetes.io/enforce: baseline` on the namespace resource. + +See https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-kubernetes-1-25-psp/ + +To remove this warning, explicitly set "podSecurityPolicy.enabled=false". +{{- end }} + +{{- if .Values.teleportVersionOverride }} + +DANGER: `teleportVersionOverride` MUST NOT be used to control the Teleport version. +This chart is designed to run Teleport version {{ .Chart.AppVersion }}. +You will face compatibility issues trying to run a different Teleport version with it. + +If you want to run Teleport version {{.Values.teleportVersionOverride}}, +you should use `helm --version {{.Values.teleportVersionOverride}}` instead. +{{- end }} diff --git a/helm/old/teleport-cluster/templates/_helpers.tpl b/helm/old/teleport-cluster/templates/_helpers.tpl new file mode 100644 index 0000000..e5c2219 --- /dev/null +++ b/helm/old/teleport-cluster/templates/_helpers.tpl @@ -0,0 +1,91 @@ +{{/* +Create the name of the service account to use +if serviceAccount is not defined or serviceAccount.name is empty, use .Release.Name +*/}} +{{- define "teleport-cluster.auth.serviceAccountName" -}} +{{- coalesce .Values.serviceAccount.name .Release.Name -}} +{{- end -}} + +{{- define "teleport-cluster.proxy.serviceAccountName" -}} +{{- coalesce .Values.serviceAccount.name .Release.Name -}}-proxy +{{- end -}} + +{{- define "teleport-cluster.version" -}} +{{- coalesce .Values.teleportVersionOverride .Chart.Version }} +{{- end -}} + +{{- define "teleport-cluster.majorVersion" -}} +{{- (semver (include "teleport-cluster.version" .)).Major -}} +{{- end -}} + +{{- define "teleport-cluster.previousMajorVersion" -}} +{{- sub (include "teleport-cluster.majorVersion" . | atoi ) 1 -}} +{{- end -}} + +{{/* Proxy selector labels */}} +{{- define "teleport-cluster.proxy.selectorLabels" -}} +app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}' +app.kubernetes.io/instance: '{{ .Release.Name }}' +app.kubernetes.io/component: 'proxy' +{{- end -}} + +{{/* Proxy all labels */}} +{{- define "teleport-cluster.proxy.labels" -}} +{{ include "teleport-cluster.proxy.selectorLabels" . }} +helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +app.kubernetes.io/managed-by: '{{ .Release.Service }}' +app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' +teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' +{{- end -}} + +{{/* Auth pods selector labels */}} +{{- define "teleport-cluster.auth.selectorLabels" -}} +app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}' +app.kubernetes.io/instance: '{{ .Release.Name }}' +app.kubernetes.io/component: 'auth' +{{- end -}} + +{{/* All pods all labels */}} +{{- define "teleport-cluster.labels" -}} +{{ include "teleport-cluster.selectorLabels" . }} +helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +app.kubernetes.io/managed-by: '{{ .Release.Service }}' +app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' +teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' +{{- end -}} + +{{/* All pods selector labels */}} +{{- define "teleport-cluster.selectorLabels" -}} +app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}' +app.kubernetes.io/instance: '{{ .Release.Name }}' +{{- end -}} + +{{/* Auth pods all labels */}} +{{- define "teleport-cluster.auth.labels" -}} +{{ include "teleport-cluster.auth.selectorLabels" . }} +helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +app.kubernetes.io/managed-by: '{{ .Release.Service }}' +app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' +teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' +{{- end -}} + +{{/* ServiceNames are limited to 63 characters, we might have to truncate the ReleaseName + to make sure the auth serviceName won't exceed this limit */}} +{{- define "teleport-cluster.auth.serviceName" -}} +{{- .Release.Name | trunc 58 | trimSuffix "-" -}}-auth +{{- end -}} + +{{- define "teleport-cluster.auth.currentVersionServiceName" -}} +{{- .Release.Name | trunc 54 | trimSuffix "-" -}}-auth-v{{ include "teleport-cluster.majorVersion" . }} +{{- end -}} + +{{- define "teleport-cluster.auth.previousVersionServiceName" -}} +{{- .Release.Name | trunc 54 | trimSuffix "-" -}}-auth-v{{ include "teleport-cluster.previousMajorVersion" . }} +{{- end -}} + + +{{/* In most places we want to use the FQDN instead of relying on Kubernetes ndots behaviour + for performance reasons */}} +{{- define "teleport-cluster.auth.serviceFQDN" -}} +{{ include "teleport-cluster.auth.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/_config.aws.tpl b/helm/old/teleport-cluster/templates/auth/_config.aws.tpl new file mode 100644 index 0000000..9fb0863 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/_config.aws.tpl @@ -0,0 +1,26 @@ +{{- define "teleport-cluster.auth.config.aws" -}} +{{ include "teleport-cluster.auth.config.common" . }} + storage: + type: dynamodb + region: {{ required "aws.region is required in chart values" .Values.aws.region }} + table_name: {{ required "aws.backendTable is required in chart values" .Values.aws.backendTable }} + {{- if .Values.aws.auditLogMirrorOnStdout }} + audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}', 'stdout://'] + {{- else }} + audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}'] + {{- end }} + audit_sessions_uri: s3://{{ required "aws.sessionRecordingBucket is required in chart values" .Values.aws.sessionRecordingBucket }} + continuous_backups: {{ required "aws.backups is required in chart values" .Values.aws.backups }} + {{- if .Values.aws.dynamoAutoScaling }} + auto_scaling: true + billing_mode: provisioned + read_min_capacity: {{ required "aws.readMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMinCapacity }} + read_max_capacity: {{ required "aws.readMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMaxCapacity }} + read_target_value: {{ required "aws.readTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.readTargetValue }} + write_min_capacity: {{ required "aws.writeMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMinCapacity }} + write_max_capacity: {{ required "aws.writeMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMaxCapacity }} + write_target_value: {{ required "aws.writeTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.writeTargetValue }} + {{- else }} + auto_scaling: false + {{- end }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/_config.azure.tpl b/helm/old/teleport-cluster/templates/auth/_config.azure.tpl new file mode 100644 index 0000000..6bdabd0 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/_config.azure.tpl @@ -0,0 +1,38 @@ +{{/* Helper to build the database connection string, adds paraneters if needed */}} +{{- define "teleport-cluster.auth.config.azure.conn_string.query" }} + {{- if .Values.azure.databasePoolMaxConnections -}} + {{- printf "sslmode=verify-full&pool_max_conns=%v" .Values.azure.databasePoolMaxConnections -}} + {{- else -}} + sslmode=verify-full + {{- end -}} +{{- end -}} + +{{- define "teleport-cluster.auth.config.azure" -}} +{{ include "teleport-cluster.auth.config.common" . }} + storage: + type: postgresql + auth_mode: azure + conn_string: {{ urlJoin (dict + "scheme" "postgresql" + "userinfo" .Values.azure.databaseUser + "host" .Values.azure.databaseHost + "path" .Values.azure.backendDatabase + "query" (include "teleport-cluster.auth.config.azure.conn_string.query" .) + ) | toYaml }} + audit_sessions_uri: {{ urlJoin (dict + "scheme" "azblob" + "host" .Values.azure.sessionRecordingStorageAccount + ) | toYaml }} + audit_events_uri: + - {{ urlJoin (dict + "scheme" "postgresql" + "userinfo" .Values.azure.databaseUser + "host" .Values.azure.databaseHost + "path" .Values.azure.auditLogDatabase + "query" "sslmode=verify-full" + "fragment" "auth_mode=azure" + ) | toYaml }} +{{- if .Values.azure.auditLogMirrorOnStdout }} + - "stdout://" +{{- end }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/_config.common.tpl b/helm/old/teleport-cluster/templates/auth/_config.common.tpl new file mode 100644 index 0000000..bdfda15 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/_config.common.tpl @@ -0,0 +1,65 @@ +{{- define "teleport-cluster.auth.config.common" -}} +{{- $authentication := mustMergeOverwrite .Values.authentication (default dict .Values.authenticationSecondFactor) -}} +{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}} +version: v3 +kubernetes_service: + enabled: true + listen_addr: 0.0.0.0:3026 + public_addr: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3026" +{{- if .Values.kubeClusterName }} + kube_cluster_name: {{ .Values.kubeClusterName }} +{{- else }} + kube_cluster_name: {{ .Values.clusterName }} +{{- end }} +{{- if .Values.labels }} + labels: {{- toYaml .Values.labels | nindent 8 }} +{{- end }} +proxy_service: + enabled: false +ssh_service: + enabled: false +auth_service: + enabled: true + cluster_name: {{ required "clusterName is required in chart values" .Values.clusterName }} +{{- if .Values.enterprise }} + license_file: '/var/lib/license/license.pem' +{{- end }} + authentication: + type: "{{ required "authentication.type is required in chart values" (coalesce .Values.authenticationType $authentication.type) }}" + local_auth: {{ $authentication.localAuth }} +{{- if $authentication.connectorName }} + connector_name: "{{ $authentication.connectorName }}" +{{- end }} +{{- if $authentication.lockingMode }} + locking_mode: "{{ $authentication.lockingMode }}" +{{- end }} +{{- if $authentication.secondFactor }} + second_factor: "{{ $authentication.secondFactor }}" + {{- if not (or (eq $authentication.secondFactor "off") (eq $authentication.secondFactor "otp")) }} + webauthn: + rp_id: {{ required "clusterName is required in chart values" .Values.clusterName }} + {{- if $authentication.webauthn }} + {{- if $authentication.webauthn.attestationAllowedCas }} + attestation_allowed_cas: {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }} + {{- end }} + {{- if $authentication.webauthn.attestationDeniedCas }} + attestation_denied_cas: {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- if .Values.sessionRecording }} + session_recording: {{ .Values.sessionRecording }} +{{- end }} +{{- if .Values.proxyListenerMode }} + proxy_listener_mode: {{ .Values.proxyListenerMode }} +{{- end }} +teleport: + auth_server: 127.0.0.1:3025 + log: + severity: {{ $logLevel }} + output: {{ .Values.log.output }} + format: + output: {{ .Values.log.format }} + extra_fields: {{ .Values.log.extraFields | toJson }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/_config.gcp.tpl b/helm/old/teleport-cluster/templates/auth/_config.gcp.tpl new file mode 100644 index 0000000..f55743b --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/_config.gcp.tpl @@ -0,0 +1,16 @@ +{{- define "teleport-cluster.auth.config.gcp" -}} +{{ include "teleport-cluster.auth.config.common" . }} + storage: + type: firestore + project_id: {{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }} + collection_name: {{ required "gcp.backendTable is required in chart values" .Values.gcp.backendTable }} + {{- if .Values.gcp.credentialSecretName }} + credentials_path: /etc/teleport-secrets/gcp-credentials.json + {{- end }} + {{- if .Values.gcp.auditLogMirrorOnStdout }} + audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}', 'stdout://'] + {{- else }} + audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}'] + {{- end }} + audit_sessions_uri: "gs://{{ required "gcp.sessionRecordingBucket is required in chart values" .Values.gcp.sessionRecordingBucket }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}" +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/_config.scratch.tpl b/helm/old/teleport-cluster/templates/auth/_config.scratch.tpl new file mode 100644 index 0000000..36c3264 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/_config.scratch.tpl @@ -0,0 +1,12 @@ +{{- define "teleport-cluster.auth.config.scratch" -}} +proxy_service: + enabled: false +ssh_service: + enabled: false +auth_service: + enabled: true +{{- end -}} + +{{- define "teleport-cluster.auth.config.custom" -}} +{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/_config.standalone.tpl b/helm/old/teleport-cluster/templates/auth/_config.standalone.tpl new file mode 100644 index 0000000..db5ff58 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/_config.standalone.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.auth.config.standalone" -}} +{{ include "teleport-cluster.auth.config.common" . }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/clusterrole.yaml b/helm/old/teleport-cluster/templates/auth/clusterrole.yaml new file mode 100644 index 0000000..6bf0886 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/clusterrole.yaml @@ -0,0 +1,71 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }} +rules: +- apiGroups: + - "" + resources: + - users + - groups + - serviceaccounts + verbs: + - impersonate +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "authorization.k8s.io" + resources: + - selfsubjectaccessreviews + verbs: + - create + +{{ if .Values.operator.enabled }} +- apiGroups: + - "resources.teleport.dev" + resources: + - teleportroles + - teleportroles/status + - teleportusers + - teleportusers/status + - teleportgithubconnectors + - teleportgithubconnectors/status + - teleportoidcconnectors + - teleportoidcconnectors/status + - teleportsamlconnectors + - teleportsamlconnectors/status + - teleportloginrules + - teleportloginrules/status + - teleportprovisiontokens + - teleportprovisiontokens/status + - teleportoktaimportrules + - teleportoktaimportrules/status + verbs: + - get + - list + - patch + - update + - watch + +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create + - get + - update + +- apiGroups: + - "" + resources: + - events + verbs: + - create +{{- end -}} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/auth/clusterrolebinding.yaml b/helm/old/teleport-cluster/templates/auth/clusterrolebinding.yaml new file mode 100644 index 0000000..ba39919 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/clusterrolebinding.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }} +subjects: +- kind: ServiceAccount + name: {{ include "teleport-cluster.auth.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +# This ClusterRoleBinding allows the auth service-account to validate Kubernetes tokens +# This is required for proxies to join using their Kubernetes tokens +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-auth + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: {{ include "teleport-cluster.auth.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/auth/config.yaml b/helm/old/teleport-cluster/templates/auth/config.yaml new file mode 100644 index 0000000..b5b53cb --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/config.yaml @@ -0,0 +1,28 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +{{- if $auth.annotations.config }} + annotations: {{- toYaml $auth.annotations.config | nindent 4 }} +{{- end }} +data: +{{- if $auth.createProxyToken }} + apply-on-startup.yaml: |2 + kind: token + version: v2 + metadata: + name: {{ .Release.Name }}-proxy + expires: "2050-01-01T00:00:00Z" + spec: + roles: [Proxy] + join_method: kubernetes + kubernetes: + allow: + - service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}" +{{- end }} + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}} diff --git a/helm/old/teleport-cluster/templates/auth/deployment.yaml b/helm/old/teleport-cluster/templates/auth/deployment.yaml new file mode 100644 index 0000000..8c71803 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/deployment.yaml @@ -0,0 +1,321 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- $replicated := gt (int $auth.highAvailability.replicaCount) 1 -}} +{{- $projectedServiceAccountToken := semverCompare ">=1.20.0-0" .Capabilities.KubeVersion.Version }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: + {{- include "teleport-cluster.auth.labels" . | nindent 4 }} + app: {{ .Release.Name }} +{{- if $auth.annotations.deployment }} + annotations: {{- toYaml $auth.annotations.deployment | nindent 4 }} +{{- end }} +spec: + replicas: {{ $auth.highAvailability.replicaCount }} +{{- if and $replicated $auth.highAvailability.minReadySeconds }} + minReadySeconds: {{ $auth.highAvailability.minReadySeconds }} +{{- end }} + strategy: +{{- if $replicated }} + # some backends support a maximum amount of auth pods (e.g. DynamoDB), + # we don't want to exceed this during a rollout. + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 +{{- else }} + # using a single replica can be because of a non-replicable storage or when applying upgrade migrations. + # In those cases, we don't want a rolling update. + type: Recreate +{{- end }} + selector: + matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + # ConfigMap checksum, to recreate the pod on config changes. + checksum/config: {{ include (print $.Template.BasePath "/auth/config.yaml") . | sha256sum }} +{{- if $auth.annotations.pod }} + {{- toYaml $auth.annotations.pod | nindent 8 }} +{{- end }} + labels: + {{- include "teleport-cluster.auth.labels" . | nindent 8 }} + app: {{ .Release.Name }} +{{- if eq $auth.chartMode "azure"}} + azure.workload.identity/use: "true" +{{- end }} + spec: +{{- if $auth.nodeSelector }} + nodeSelector: {{- toYaml $auth.nodeSelector | nindent 8 }} +{{- end }} + affinity: +{{- if $auth.affinity }} + {{- if $auth.highAvailability.requireAntiAffinity }} + {{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }} + {{- end }} + {{- toYaml $auth.affinity | nindent 8 }} +{{- else }} + podAntiAffinity: + {{- if $auth.highAvailability.requireAntiAffinity }} + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - auth + topologyKey: "kubernetes.io/hostname" + {{- else if $replicated }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - auth + topologyKey: "kubernetes.io/hostname" + {{- end }} +{{- end }} +{{- if $auth.tolerations }} + tolerations: {{- toYaml $auth.tolerations | nindent 6 }} +{{- end }} +{{- if $auth.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $auth.imagePullSecrets | nindent 6 }} +{{- end }} +{{- if $auth.initContainers }} + initContainers: + {{- range $initContainer := $auth.initContainers }} + {{- if and (not $initContainer.resources) $auth.resources }} + {{- $_ := set $initContainer "resources" $auth.resources }} + {{- end }} + {{- list $initContainer | toYaml | nindent 8 }} + {{- /* Note: this will break if the user sets volumeMounts to its initContainer */}} + volumeMounts: + {{- if $auth.enterprise }} + - mountPath: /var/lib/license + name: "license" + readOnly: true + {{- end }} + {{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }} + - mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true + {{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" + {{- if $projectedServiceAccountToken }} + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + {{- end }} + {{- if $auth.extraVolumeMounts }} + {{- toYaml $auth.extraVolumeMounts | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} + containers: + - name: "teleport" + image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $auth.imagePullPolicy }} + {{- if or $auth.extraEnv $auth.tls.existingCASecretName }} + env: + {{- if (gt (len $auth.extraEnv) 0) }} + {{- toYaml $auth.extraEnv | nindent 8 }} + {{- end }} + {{- if $auth.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} + {{- end }} + args: + - "--diag-addr=0.0.0.0:3000" + {{- if $auth.insecureSkipProxyTLSVerify }} + - "--insecure" + {{- end }} + {{- if $auth.createProxyToken }} + - "--apply-on-startup=/etc/teleport/apply-on-startup.yaml" + {{- end }} + {{- if $auth.extraArgs }} + {{- toYaml $auth.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: diag + containerPort: 3000 + protocol: TCP + - name: auth + containerPort: 3025 + protocol: TCP + - name: kube + containerPort: 3026 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to start + periodSeconds: 5 # poll health every 5s + failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s) + timeoutSeconds: {{ .Values.probeTimeoutSeconds }} + readinessProbe: + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to register + periodSeconds: 5 # poll health every 5s + failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s) + timeoutSeconds: {{ .Values.probeTimeoutSeconds }} + lifecycle: + # waiting during preStop ensures no new request will hit the Terminating pod + # on clusters using kube-proxy (kube-proxy syncs the node iptables rules every 30s) + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s +{{- if $auth.postStart.command }} + postStart: + exec: + command: {{ toYaml $auth.postStart.command | nindent 14 }} +{{- end }} +{{- if $auth.resources }} + resources: + {{- toYaml $auth.resources | nindent 10 }} +{{- end }} +{{- if $auth.securityContext }} + securityContext: {{- toYaml $auth.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if $auth.enterprise }} + - mountPath: /var/lib/license + name: "license" + readOnly: true +{{- end }} +{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }} + - mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true +{{- end }} +{{- if $auth.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if $projectedServiceAccountToken }} + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true +{{- end }} +{{- if $auth.extraVolumeMounts }} + {{- toYaml $auth.extraVolumeMounts | nindent 8 }} +{{- end }} +{{- /* Operator uses '.Values' instead of '$auth' as it will likely be moved out of the auth pods */}} +{{- if .Values.operator.enabled }} + - name: "operator" + image: '{{ .Values.operator.image }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ .Values.imagePullPolicy }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + {{- if .Values.operator.resources }} + resources: {{- toYaml .Values.operator.resources | nindent 10 }} + {{- end }} + volumeMounts: + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" + {{- if $projectedServiceAccountToken }} + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + {{- end }} +{{ end }} +{{- if $projectedServiceAccountToken }} + automountServiceAccountToken: false +{{- end }} + volumes: +{{- if $projectedServiceAccountToken }} + # This projected token volume mimics the `automountServiceAccountToken` + # behaviour but defaults to a 1h TTL instead of 1y. + - name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace +{{- end }} +{{- if $auth.enterprise }} + - name: license + secret: + secretName: "license" +{{- end }} +{{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }} + - name: gcp-credentials + secret: + secretName: {{ $auth.gcp.credentialSecretName | quote }} +{{- end }} +{{- if $auth.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ $auth.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-auth + - name: "data" + {{- if and ($auth.persistence.enabled) ( and (not (eq $auth.chartMode "gcp")) (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "azure"))) }} + persistentVolumeClaim: + claimName: {{ if $auth.persistence.existingClaimName }}{{ $auth.persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }} + {{- else }} + emptyDir: {} + {{- end }} +{{- if $auth.extraVolumes }} + {{- toYaml $auth.extraVolumes | nindent 6 }} +{{- end }} +{{- if $auth.priorityClassName }} + priorityClassName: {{ $auth.priorityClassName }} +{{- end }} + serviceAccountName: {{ include "teleport-cluster.auth.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ $auth.terminationGracePeriodSeconds }} diff --git a/helm/old/teleport-cluster/templates/auth/pdb.yaml b/helm/old/teleport-cluster/templates/auth/pdb.yaml new file mode 100644 index 0000000..0109589 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/pdb.yaml @@ -0,0 +1,17 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- if $auth.highAvailability.podDisruptionBudget.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + minAvailable: {{ $auth.highAvailability.podDisruptionBudget.minAvailable }} + selector: + matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/auth/predeploy_config.yaml b/helm/old/teleport-cluster/templates/auth/predeploy_config.yaml new file mode 100644 index 0000000..1419440 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/predeploy_config.yaml @@ -0,0 +1,31 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- if $auth.validateConfigOnDeploy }} +{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-auth-test + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "4" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +data: +{{- if $auth.createProxyToken }} + apply-on-startup.yaml: |2 + kind: token + version: v2 + metadata: + name: {{ .Release.Name }}-proxy + expires: "3000-01-01T00:00:00Z" + spec: + roles: [Proxy] + join_method: kubernetes + kubernetes: + allow: + - service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}" +{{- end }} + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/auth/predeploy_job.yaml b/helm/old/teleport-cluster/templates/auth/predeploy_job.yaml new file mode 100644 index 0000000..a03225d --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/predeploy_job.yaml @@ -0,0 +1,103 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- if $auth.validateConfigOnDeploy }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-auth-test + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 1 + template: + spec: +{{- if $auth.affinity }} + affinity: {{- toYaml $auth.affinity | nindent 8 }} +{{- end }} +{{- if $auth.tolerations }} + tolerations: {{- toYaml $auth.tolerations | nindent 6 }} +{{- end }} +{{- if $auth.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $auth.imagePullSecrets | nindent 6 }} +{{- end }} + restartPolicy: Never + containers: + - name: "teleport-config-check" + image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $auth.imagePullPolicy }} +{{- if $auth.resources }} + resources: + {{- toYaml $auth.resources | nindent 10 }} +{{- end }} +{{- if or $auth.extraEnv $auth.tls.existingCASecretName }} + env: + {{- if (gt (len $auth.extraEnv) 0) }} + {{- toYaml $auth.extraEnv | nindent 8 }} + {{- end }} + {{- if $auth.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} +{{- end }} + command: + - "teleport" + - "configure" + args: + - "--test" + - "/etc/teleport/teleport.yaml" +{{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if .Values.enterprise }} + - mountPath: /var/lib/license + name: "license" + readOnly: true +{{- end }} +{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} + - mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true +{{- end }} +{{- if .Values.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 8 }} +{{- end }} + volumes: +{{- if .Values.enterprise }} + - name: license + secret: + secretName: "license" +{{- end }} +{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} + - name: gcp-credentials + secret: + secretName: {{ .Values.gcp.credentialSecretName | quote }} +{{- end }} +{{- if .Values.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ .Values.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-auth-test + - name: "data" + emptyDir: {} +{{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 6 }} +{{- end }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/auth/pvc.yaml b/helm/old/teleport-cluster/templates/auth/pvc.yaml new file mode 100644 index 0000000..640e3eb --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/pvc.yaml @@ -0,0 +1,24 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- if $auth.persistence.enabled }} + {{/* Disable persistence for cloud modes */}} + {{- if and (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "gcp")) (not (eq $auth.chartMode "azure")) }} + {{/* No need to create a PVC if we reuse an existing claim */}} + {{- if not $auth.persistence.existingClaimName }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + {{- if $auth.persistence.storageClassName }} + storageClassName: {{ $auth.persistence.storageClassName }} + {{- end }} + resources: + requests: + storage: {{ required "persistence.volumeSize is required in chart values" $auth.persistence.volumeSize }} + {{- end }} + {{- end }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/auth/service-previous-version.yaml b/helm/old/teleport-cluster/templates/auth/service-previous-version.yaml new file mode 100644 index 0000000..75b4b06 --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/service-previous-version.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "teleport-cluster.auth.previousVersionServiceName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + # This is a headless service. Resolving it will return the list of all auth pods running the previous major version + # Proxies should not connect to auth pods from the previous major version + # Proxy rollout should be held until this headLessService does not match pods anymore. + clusterIP: "None" + # Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for + publishNotReadyAddresses: true + selector: + {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }} + teleport.dev/majorVersion: {{ include "teleport-cluster.previousMajorVersion" . | quote }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "teleport-cluster.auth.currentVersionServiceName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + # This is a headless service. Resolving it will return the list of all auth pods running the current major version + clusterIP: "None" + # Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for + publishNotReadyAddresses: true + selector: + {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }} + teleport.dev/majorVersion: {{ include "teleport-cluster.majorVersion" . | quote }} diff --git a/helm/old/teleport-cluster/templates/auth/service.yaml b/helm/old/teleport-cluster/templates/auth/service.yaml new file mode 100644 index 0000000..e5175fb --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/service.yaml @@ -0,0 +1,21 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "teleport-cluster.auth.serviceName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +{{- if $auth.annotations.service }} + annotations: {{- toYaml $auth.annotations.service | nindent 4 }} +{{- end }} +spec: + ports: + - name: auth + port: 3025 + targetPort: 3025 + protocol: TCP + - name: kube + port: 3026 + targetPort: 3026 + protocol: TCP + selector: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }} diff --git a/helm/old/teleport-cluster/templates/auth/serviceaccount.yaml b/helm/old/teleport-cluster/templates/auth/serviceaccount.yaml new file mode 100644 index 0000000..2ee2e1a --- /dev/null +++ b/helm/old/teleport-cluster/templates/auth/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- $auth := mustMergeOverwrite (mustDeepCopy .Values) .Values.auth -}} +{{- if $auth.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "teleport-cluster.auth.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if or $auth.annotations.serviceAccount $auth.azure.clientID }} + annotations: + {{- if $auth.annotations.serviceAccount }} + {{- toYaml $auth.annotations.serviceAccount | nindent 4 }} + {{- end }} + {{- if $auth.azure.clientID }} + azure.workload.identity/client-id: "{{ $auth.azure.clientID }}" + {{- end }} + {{- end -}} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/podmonitor.yaml b/helm/old/teleport-cluster/templates/podmonitor.yaml new file mode 100644 index 0000000..7201cae --- /dev/null +++ b/helm/old/teleport-cluster/templates/podmonitor.yaml @@ -0,0 +1,31 @@ +{{- if.Values.podMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "teleport-cluster.labels" . | nindent 4 }} + {{- with .Values.podMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "teleport-cluster.selectorLabels" . | nindent 6 }} + podMetricsEndpoints: + - port: diag + path: /metrics + {{- with .Values.podMonitor.interval }} + interval: {{ . | quote }} + {{- end }} + podTargetLabels: + - "app.kubernetes.io/name" + - "app.kubernetes.io/instance" + - "app.kubernetes.io/component" + - "app.kubernetes.io/version" + - "teleport.dev/majorVersion" +{{- end }} diff --git a/helm/old/teleport-cluster/templates/proxy/_config.aws.tpl b/helm/old/teleport-cluster/templates/proxy/_config.aws.tpl new file mode 100644 index 0000000..3e4d97a --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/_config.aws.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.aws" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/proxy/_config.azure.tpl b/helm/old/teleport-cluster/templates/proxy/_config.azure.tpl new file mode 100644 index 0000000..96ccbc7 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/_config.azure.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.azure" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/proxy/_config.common.tpl b/helm/old/teleport-cluster/templates/proxy/_config.common.tpl new file mode 100644 index 0000000..b6c5e41 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/_config.common.tpl @@ -0,0 +1,76 @@ +{{- define "teleport-cluster.proxy.config.common" -}} +{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}} +version: v3 +teleport: + join_params: + method: kubernetes + token_name: "{{.Release.Name}}-proxy" + auth_server: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3025" + log: + severity: {{ $logLevel }} + output: {{ .Values.log.output }} + format: + output: {{ .Values.log.format }} + extra_fields: {{ .Values.log.extraFields | toJson }} +ssh_service: + enabled: false +auth_service: + enabled: false +proxy_service: + enabled: true +{{- if .Values.publicAddr }} + public_addr: {{- toYaml .Values.publicAddr | nindent 8 }} +{{- else }} + public_addr: '{{ required "clusterName is required in chart values" .Values.clusterName }}:443' +{{- end }} +{{- if ne .Values.proxyListenerMode "multiplex" }} + listen_addr: 0.0.0.0:3023 + {{- if .Values.sshPublicAddr }} + ssh_public_addr: {{- toYaml .Values.sshPublicAddr | nindent 8 }} + {{- end }} + tunnel_listen_addr: 0.0.0.0:3024 + {{- if .Values.tunnelPublicAddr }} + tunnel_public_addr: {{- toYaml .Values.tunnelPublicAddr | nindent 8 }} + {{- end }} + kube_listen_addr: 0.0.0.0:3026 + {{- if .Values.kubePublicAddr }} + kube_public_addr: {{- toYaml .Values.kubePublicAddr | nindent 8 }} + {{- end }} + mysql_listen_addr: 0.0.0.0:3036 + {{- if .Values.mysqlPublicAddr }} + mysql_public_addr: {{- toYaml .Values.mysqlPublicAddr | nindent 8 }} + {{- end }} + {{- if .Values.separatePostgresListener }} + postgres_listen_addr: 0.0.0.0:5432 + {{- if .Values.postgresPublicAddr }} + postgres_public_addr: {{- toYaml .Values.postgresPublicAddr | nindent 8 }} + {{- else }} + postgres_public_addr: {{ .Values.clusterName }}:5432 + {{- end }} + {{- end }} + {{- if .Values.separateMongoListener }} + mongo_listen_addr: 0.0.0.0:27017 + {{- if .Values.mongoPublicAddr }} + mongo_public_addr: {{- toYaml .Values.mongoPublicAddr | nindent 8 }} + {{- else }} + mongo_public_addr: {{ .Values.clusterName }}:27017 + {{- end }} + {{- end }} +{{- end }} +{{- if or .Values.highAvailability.certManager.enabled .Values.tls.existingSecretName }} + https_keypairs: + - key_file: /etc/teleport-tls/tls.key + cert_file: /etc/teleport-tls/tls.crt + https_keypairs_reload_interval: 12h +{{- else if .Values.acme }} + acme: + enabled: {{ .Values.acme }} + email: {{ required "acmeEmail is required in chart values" .Values.acmeEmail }} + {{- if .Values.acmeURI }} + uri: {{ .Values.acmeURI }} + {{- end }} +{{- end }} +{{- if and .Values.ingress.enabled (semverCompare ">= 13.2.0-0" (include "teleport-cluster.version" .)) }} + trust_x_forwarded_for: true +{{- end }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/proxy/_config.gcp.tpl b/helm/old/teleport-cluster/templates/proxy/_config.gcp.tpl new file mode 100644 index 0000000..cf9c79d --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/_config.gcp.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.gcp" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/proxy/_config.scratch.tpl b/helm/old/teleport-cluster/templates/proxy/_config.scratch.tpl new file mode 100644 index 0000000..0efddce --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/_config.scratch.tpl @@ -0,0 +1,12 @@ +{{- define "teleport-cluster.proxy.config.scratch" -}} +ssh_service: + enabled: false +auth_service: + enabled: false +proxy_service: + enabled: true +{{- end -}} + +{{- define "teleport-cluster.proxy.config.custom" -}} +{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/proxy/_config.standalone.tpl b/helm/old/teleport-cluster/templates/proxy/_config.standalone.tpl new file mode 100644 index 0000000..7355813 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/_config.standalone.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.standalone" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/helm/old/teleport-cluster/templates/proxy/certificate.yaml b/helm/old/teleport-cluster/templates/proxy/certificate.yaml new file mode 100644 index 0000000..d1a98ee --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/certificate.yaml @@ -0,0 +1,27 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- if $proxy.highAvailability.certManager.enabled }} + {{- $domain := (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }} + {{- $domainWildcard := printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} +spec: + secretName: teleport-tls + {{- if $proxy.highAvailability.certManager.addCommonName }} + commonName: {{ quote $domain }} + {{- end }} + dnsNames: + - {{ quote $domain }} + - {{ quote $domainWildcard }} + issuerRef: + name: {{ required "highAvailability.certManager.issuerName is required in chart values" $proxy.highAvailability.certManager.issuerName }} + kind: {{ required "highAvailability.certManager.issuerKind is required in chart values" $proxy.highAvailability.certManager.issuerKind }} + group: {{ required "highAvailability.certManager.issuerGroup is required in chart values" $proxy.highAvailability.certManager.issuerGroup }} + {{- with $proxy.annotations.certSecret }} + secretTemplate: + annotations: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/proxy/config.yaml b/helm/old/teleport-cluster/templates/proxy/config.yaml new file mode 100644 index 0000000..8cd7788 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/config.yaml @@ -0,0 +1,16 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}} +{{- if (contains ":" $proxy.clusterName) -}} + {{- fail "clusterName must not contain a colon, you can override the cluster's public address with publicAddr" -}} +{{- end -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} +{{- if $proxy.annotations.config }} + annotations: {{- toYaml $proxy.annotations.config | nindent 4 }} +{{- end }} +data: + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}} diff --git a/helm/old/teleport-cluster/templates/proxy/deployment.yaml b/helm/old/teleport-cluster/templates/proxy/deployment.yaml new file mode 100644 index 0000000..a77c339 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/deployment.yaml @@ -0,0 +1,307 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- $replicable := or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName -}} +{{- $projectedServiceAccountToken := semverCompare ">=1.20.0-0" .Capabilities.KubeVersion.Version }} +# Deployment is {{ if not $replicable }}not {{end}}replicable +{{- if and $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }} +{{- fail "Cannot set both highAvailability.certManager.enabled and tls.existingSecretName, choose one or the other" }} +{{- end }} +{{- if and $proxy.acme $proxy.tls.existingSecretName }} +{{- fail "Cannot set both acme.enabled and tls.existingSecretName, choose one or the other" }} +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} +{{- if $proxy.annotations.deployment }} + annotations: {{- toYaml $proxy.annotations.deployment | nindent 4 }} +{{- end }} +spec: +{{- /* + If proxies cannot be replicated we use a single replica. + By default we want to upgrade all users to at least 2 replicas, if they had a higher replica count we take it. + If a user wants to force a single proxy, they can use the `proxy` specific override. + + $proxySpecificHA is a hack to avoid .Values.proxy.highAvailability to be nil, which would cause a fail when + accessing .Values.proxy.highAvailability.replicaCount. +*/}} +{{- if $replicable }} + {{- $proxySpecificHA := default (dict) .Values.proxy.highAvailability }} + {{- if $proxySpecificHA.replicaCount }} + replicas: {{ $proxySpecificHA.replicaCount }} + {{- else }} + replicas: {{ max .Values.highAvailability.replicaCount 2 }} + {{- end }} + {{- if $proxy.highAvailability.minReadySeconds }} + minReadySeconds: {{ $proxy.highAvailability.minReadySeconds }} + {{- end }} +{{- else }} + replicas: 1 +{{- end }} + selector: + matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + # ConfigMap checksum, to recreate the pod on config changes. + checksum/config: {{ include (print $.Template.BasePath "/proxy/config.yaml") . | sha256sum }} +{{- if $proxy.annotations.pod }} + {{- toYaml $proxy.annotations.pod | nindent 8 }} +{{- end }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 8 }} + spec: +{{- if $proxy.nodeSelector }} + nodeSelector: {{- toYaml $proxy.nodeSelector | nindent 8 }} +{{- end }} + affinity: +{{- if $proxy.affinity }} + {{- if $proxy.highAvailability.requireAntiAffinity }} + {{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }} + {{- end }} + {{- toYaml $proxy.affinity | nindent 8 }} +{{- else }} + podAntiAffinity: + {{- if $proxy.highAvailability.requireAntiAffinity }} + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: "kubernetes.io/hostname" + {{- else if gt (int $proxy.highAvailability.replicaCount) 1 }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: "kubernetes.io/hostname" + {{- end }} +{{- end }} +{{- if $proxy.tolerations }} + tolerations: {{- toYaml $proxy.tolerations | nindent 6 }} +{{- end }} +{{- if $proxy.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $proxy.imagePullSecrets | nindent 6 }} +{{- end }} + initContainers: + # wait-auth-update is responsible for holding off the proxy rollout until all auths are running the + # next major version in case of major upgrade. + - name: wait-auth-update + image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + command: + - teleport + - wait + - no-resolve + - '{{ include "teleport-cluster.auth.previousVersionServiceName" . }}.{{ .Release.Namespace }}.svc.cluster.local' +{{- if $proxy.securityContext }} + securityContext: {{- toYaml $proxy.securityContext | nindent 12 }} +{{- end }} +{{- if $proxy.initContainers }} + {{- range $initContainer := $proxy.initContainers }} + {{- if and (not $initContainer.resources) $proxy.resources }} + {{- $_ := set $initContainer "resources" $proxy.resources }} + {{- end }} + {{- list $initContainer | toYaml | nindent 8 }} + {{- /* Note: this will break if the user sets volumeMounts to its initContainer */}} + volumeMounts: + {{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + {{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" + {{- if $proxy.extraVolumeMounts }} + {{- toYaml $proxy.extraVolumeMounts | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} + containers: + - name: "teleport" + image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $proxy.imagePullPolicy }} + {{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }} + env: + {{- if (gt (len $proxy.extraEnv) 0) }} + {{- toYaml $proxy.extraEnv | nindent 8 }} + {{- end }} + {{- if $proxy.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} + {{- end }} + args: + - "--diag-addr=0.0.0.0:3000" + {{- if $proxy.insecureSkipProxyTLSVerify }} + - "--insecure" + {{- end }} + {{- if $proxy.extraArgs }} + {{- toYaml $proxy.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: tls + containerPort: 3080 + protocol: TCP + {{- if $proxy.enterprise }} + - name: proxypeering + containerPort: 3021 + protocol: TCP + {{- end }} + {{- if ne $proxy.proxyListenerMode "multiplex" }} + - name: sshproxy + containerPort: 3023 + protocol: TCP + - name: sshtun + containerPort: 3024 + protocol: TCP + - name: kube + containerPort: 3026 + protocol: TCP + - name: mysql + containerPort: 3036 + protocol: TCP + {{- if $proxy.separatePostgresListener }} + - name: postgres + containerPort: 5432 + protocol: TCP + {{- end }} + {{- if $proxy.separateMongoListener }} + - name: mongo + containerPort: 27017 + protocol: TCP + {{- end }} + {{- end }} + - name: diag + containerPort: 3000 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to start + periodSeconds: 5 # poll health every 5s + failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s) + timeoutSeconds: {{ $proxy.probeTimeoutSeconds }} + readinessProbe: + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to register + periodSeconds: 5 # poll health every 5s + failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s) + timeoutSeconds: {{ $proxy.probeTimeoutSeconds }} + lifecycle: + # waiting during preStop ensures no new request will hit the Terminating pod + # on clusters using kube-proxy (kube-proxy syncs the node iptables rules every 30s) + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s +{{- if $proxy.postStart.command }} + postStart: + exec: + command: {{ toYaml $proxy.postStart.command | nindent 14 }} +{{- end }} +{{- if $proxy.resources }} + resources: + {{- toYaml $proxy.resources | nindent 10 }} +{{- end }} +{{- if $proxy.securityContext }} + securityContext: {{- toYaml $proxy.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if $projectedServiceAccountToken }} + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true +{{- end }} +{{- if $proxy.extraVolumeMounts }} + {{- toYaml $proxy.extraVolumeMounts | nindent 8 }} +{{- end }} +{{- if $projectedServiceAccountToken }} + automountServiceAccountToken: false +{{- end }} + volumes: +{{- if $projectedServiceAccountToken }} + # This projected token volume mimics the `automountServiceAccountToken` + # behaviour but defaults to a 1h TTL instead of 1y. + - name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace +{{- end }} +{{- if $proxy.highAvailability.certManager.enabled }} + - name: teleport-tls + secret: + secretName: teleport-tls +{{- else if $proxy.tls.existingSecretName }} + - name: teleport-tls + secret: + secretName: {{ $proxy.tls.existingSecretName }} +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ $proxy.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-proxy + - name: "data" + emptyDir: {} +{{- if $proxy.extraVolumes }} + {{- toYaml $proxy.extraVolumes | nindent 6 }} +{{- end }} +{{- if $proxy.priorityClassName }} + priorityClassName: {{ $proxy.priorityClassName }} +{{- end }} + serviceAccountName: {{ include "teleport-cluster.proxy.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ $proxy.terminationGracePeriodSeconds }} diff --git a/helm/old/teleport-cluster/templates/proxy/ingress.yaml b/helm/old/teleport-cluster/templates/proxy/ingress.yaml new file mode 100644 index 0000000..e0a2e38 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/ingress.yaml @@ -0,0 +1,57 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- if .Values.ingress.enabled -}} + {{- if (not (eq .Values.proxyListenerMode "multiplex")) -}} + {{- fail "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/" -}} + {{- end -}} + {{- $publicAddr := coalesce .Values.publicAddr (list .Values.clusterName) -}} + {{- /* Trim ports from all public addresses if present */ -}} + {{- range $publicAddr -}} + {{- $address := . -}} + {{- if (contains ":" $address) -}} + {{- $split := split ":" $address -}} + {{- $address = $split._0 -}} + {{- $publicAddr = append (mustWithout $publicAddr .) $address -}} + {{- end -}} + {{- $wildcard := printf "*.%s" $address -}} + {{- /* Add wildcard versions of all public addresses to ingress, unless 1) suppressed or 2) wildcard version already exists */ -}} + {{- if and (not $.Values.ingress.suppressAutomaticWildcards) (not (hasPrefix "*." $address)) (not (has $wildcard $publicAddr)) -}} + {{- $publicAddr = append $publicAddr (printf "*.%s" $address) -}} + {{- end -}} + {{- end -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} + {{- if $proxy.annotations.ingress }} + annotations: {{- toYaml $proxy.annotations.ingress | nindent 4 }} + {{- end }} +spec: + {{- with $proxy.ingress.spec }} + {{- toYaml . | nindent 2 }} + {{- end }} + tls: + - hosts: + {{- range $publicAddr }} + - {{ quote . }} + {{- end }} + {{- if $proxy.highAvailability.certManager.enabled }} + secretName: teleport-tls + {{- else if $proxy.tls.existingSecretName }} + secretName: {{ $proxy.tls.existingSecretName }} + {{- end }} + rules: + {{- range $publicAddr }} + - host: {{ quote . }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $.Release.Name }} + port: + number: 443 + {{- end }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/proxy/pdb.yaml b/helm/old/teleport-cluster/templates/proxy/pdb.yaml new file mode 100644 index 0000000..f220031 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/pdb.yaml @@ -0,0 +1,17 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- if $proxy.highAvailability.podDisruptionBudget.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} +spec: + minAvailable: {{ $proxy.highAvailability.podDisruptionBudget.minAvailable }} + selector: + matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/proxy/predeploy_config.yaml b/helm/old/teleport-cluster/templates/proxy/predeploy_config.yaml new file mode 100644 index 0000000..6e2d374 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/predeploy_config.yaml @@ -0,0 +1,16 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- if $proxy.validateConfigOnDeploy }} +{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-proxy-test + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "4" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +data: + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/proxy/predeploy_job.yaml b/helm/old/teleport-cluster/templates/proxy/predeploy_job.yaml new file mode 100644 index 0000000..e0fb551 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/predeploy_job.yaml @@ -0,0 +1,99 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- if $proxy.validateConfigOnDeploy }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-proxy-test + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 1 + template: + spec: +{{- if $proxy.affinity }} + affinity: {{- toYaml $proxy.affinity | nindent 8 }} +{{- end }} +{{- if $proxy.tolerations }} + tolerations: {{- toYaml $proxy.tolerations | nindent 6 }} +{{- end }} +{{- if $proxy.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $proxy.imagePullSecrets | nindent 6 }} +{{- end }} + restartPolicy: Never + containers: + - name: "teleport" + image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $proxy.imagePullPolicy }} +{{- if $proxy.resources }} + resources: + {{- toYaml $proxy.resources | nindent 10 }} +{{- end }} +{{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }} + env: + {{- if (gt (len $proxy.extraEnv) 0) }} + {{- toYaml $proxy.extraEnv | nindent 8 }} + {{- end }} + {{- if $proxy.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} +{{- end }} + command: + - "teleport" + - "configure" + args: + - "--test" + - "/etc/teleport/teleport.yaml" +{{- if $proxy.securityContext }} + securityContext: {{- toYaml $proxy.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if $proxy.extraVolumeMounts }} + {{- toYaml $proxy.extraVolumeMounts | nindent 8 }} +{{- end }} + volumes: +{{- if $proxy.highAvailability.certManager.enabled }} + - name: teleport-tls + secret: + secretName: teleport-tls + # this avoids deadlock during initial setup + optional: true +{{- else if $proxy.tls.existingSecretName }} + - name: teleport-tls + secret: + secretName: {{ $proxy.tls.existingSecretName }} +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ $proxy.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-proxy-test + - name: "data" + emptyDir: {} +{{- if $proxy.extraVolumes }} + {{- toYaml $proxy.extraVolumes | nindent 6 }} +{{- end }} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/proxy/service.yaml b/helm/old/teleport-cluster/templates/proxy/service.yaml new file mode 100644 index 0000000..b7e9c27 --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/service.yaml @@ -0,0 +1,70 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- $backendProtocol := ternary "ssl" "tcp" (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-ssl-cert") -}} +{{- /* Fail early if proxy service type is set to LoadBalancer when ingress.enabled=true */ -}} +{{- if and $proxy.ingress.enabled (eq $proxy.service.type "LoadBalancer") -}} + {{- fail "proxy.service.type must not be LoadBalancer when using an ingress - any load balancer should be provisioned by your ingress controller. Set proxy.service.type=ClusterIP instead" -}} +{{- end -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} + {{- if (or ($proxy.annotations.service) (eq $proxy.chartMode "aws")) }} + annotations: + {{- if and (eq $proxy.chartMode "aws") (not $proxy.ingress.enabled) }} + {{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-backend-protocol")}} + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: {{ $backendProtocol }} + {{- end }} + {{- if not (or (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled") (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-attributes"))}} + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + {{- end }} + {{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-type")}} + service.beta.kubernetes.io/aws-load-balancer-type: nlb + {{- end }} + {{- end }} + {{- if $proxy.annotations.service }} + {{- toYaml $proxy.annotations.service | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ default "LoadBalancer" $proxy.service.type }} +{{- with $proxy.service.spec }} + {{- toYaml . | nindent 2 }} +{{- end }} + ports: + - name: tls + port: 443 + targetPort: 3080 + protocol: TCP +{{- if ne $proxy.proxyListenerMode "multiplex" }} + - name: sshproxy + port: 3023 + targetPort: 3023 + protocol: TCP + - name: k8s + port: 3026 + targetPort: 3026 + protocol: TCP + - name: sshtun + port: 3024 + targetPort: 3024 + protocol: TCP + - name: mysql + port: 3036 + targetPort: 3036 + protocol: TCP + {{- if $proxy.separatePostgresListener }} + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP + {{- end }} + {{- if $proxy.separateMongoListener }} + - name: mongo + port: 27017 + targetPort: 27017 + protocol: TCP + {{- end }} +{{- end }} + selector: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 4 }} diff --git a/helm/old/teleport-cluster/templates/proxy/serviceaccount.yaml b/helm/old/teleport-cluster/templates/proxy/serviceaccount.yaml new file mode 100644 index 0000000..66a9c4b --- /dev/null +++ b/helm/old/teleport-cluster/templates/proxy/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} +{{- if $proxy.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "teleport-cluster.proxy.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if $proxy.annotations.serviceAccount }} + annotations: {{- toYaml $proxy.annotations.serviceAccount | nindent 4 }} +{{- end -}} +{{- end }} diff --git a/helm/old/teleport-cluster/templates/psp.yaml b/helm/old/teleport-cluster/templates/psp.yaml new file mode 100644 index 0000000..8abd2d7 --- /dev/null +++ b/helm/old/teleport-cluster/templates/psp.yaml @@ -0,0 +1,68 @@ +{{/* PSPs are deprecated in 1.22 and removed in 1.25. However Helm doesn't handle their removal properly in 1.25 + We must remove them before 1.25 to ensure the Helm state doesn't corrupt. As this is a breaking change, this + only applies to v12+ charts. v11 and below will only show a warning from the NOTES.txt. + Users must use PSAs instead (beta in 1.23, GA in 1.25). The "teleport-cluster" chart runs in "baseline" mode */}} +{{- if and .Values.podSecurityPolicy.enabled (semverCompare "<1.23.0-0" .Capabilities.KubeVersion.Version) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ .Release.Name }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + runAsUser: + rule: MustRunAsNonRoot + fsGroup: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + volumes: + - '*' + hostNetwork: false + hostIPC: false + hostPID: false +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-psp + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ .Release.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-psp + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-psp +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }} +{{- end -}} diff --git a/helm/old/teleport-cluster/tests/README.md b/helm/old/teleport-cluster/tests/README.md new file mode 100644 index 0000000..d81e659 --- /dev/null +++ b/helm/old/teleport-cluster/tests/README.md @@ -0,0 +1,23 @@ +## Unit tests for Helm charts + +Helm chart unit tests run here using the [helm-unittest](https://github.com/quintush/helm-unittest/) Helm plugin. + +*Note: there are multiple forks for the helm-unittest plugin. +They are not compatible and don't provide the same featureset (e.g. including templates from sub-directories). +Our tests rely on features and bugfixes that are only available on the quintush fork +(which seems to be the most maintained at the time of writing)* + +If you get a snapshot error during your testing, you should verify that your changes intended to alter the output, then run +this command from the root of your Teleport checkout to update the snapshots: + +```bash +make -C build.assets test-helm-update-snapshots +``` + +After this, re-run the tests to make sure everything is fine: + +```bash +make -C build.assets test-helm +``` + +Commit the updated snapshots along with your changes. diff --git a/helm/old/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap new file mode 100644 index 0000000..75650c0 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap @@ -0,0 +1,66 @@ +adds operator permissions to ClusterRole: + 1: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: RELEASE-NAME + rules: + - apiGroups: + - "" + resources: + - users + - groups + - serviceaccounts + verbs: + - impersonate + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - authorization.k8s.io + resources: + - selfsubjectaccessreviews + verbs: + - create + - apiGroups: + - resources.teleport.dev + resources: + - teleportroles + - teleportroles/status + - teleportusers + - teleportusers/status + - teleportgithubconnectors + - teleportgithubconnectors/status + - teleportoidcconnectors + - teleportoidcconnectors/status + - teleportsamlconnectors + - teleportsamlconnectors/status + - teleportloginrules + - teleportloginrules/status + - teleportprovisiontokens + - teleportprovisiontokens/status + - teleportoktaimportrules + - teleportoktaimportrules/status + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create diff --git a/helm/old/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap new file mode 100644 index 0000000..ed8eb56 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap @@ -0,0 +1,1674 @@ +adds a proxy token by default: + 1: | + | + kind: token + version: v2 + metadata: + name: RELEASE-NAME-proxy + expires: "2050-01-01T00:00:00Z" + spec: + roles: [Proxy] + join_method: kubernetes + kubernetes: + allow: + - service_account: "NAMESPACE:RELEASE-NAME-proxy" +matches snapshot for acme-off.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-cluster-name + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for acme-on.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-acme-cluster + cluster_name: test-acme-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-acme-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for acme-uri-staging.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-acme-cluster + cluster_name: test-acme-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-acme-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-connector-name.yaml: + 1: | + |- + auth_service: + authentication: + connector_name: okta + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-disable-local.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: false + second_factor: "off" + type: github + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-locking-mode.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + locking_mode: strict + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-passwordless.yaml: + 1: | + |- + auth_service: + authentication: + connector_name: passwordless + local_auth: true + second_factor: webauthn + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-type-legacy.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: github + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-type.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: github + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-webauthn-legacy.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + attestation_allowed_cas: + - /etc/ssl/certs/ca-certificates.crt + attestation_denied_cas: + - /etc/ssl/certs/ca-certificates.crt + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-webauthn.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + attestation_allowed_cas: + - /etc/ssl/certs/ca-certificates.crt + attestation_denied_cas: + - /etc/ssl/certs/ca-certificates.crt + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for aws-dynamodb-autoscaling.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: true + billing_mode: provisioned + continuous_backups: false + read_max_capacity: 100 + read_min_capacity: 5 + read_target_value: 50 + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + write_max_capacity: 100 + write_min_capacity: 5 + write_target_value: 50 + version: v3 +matches snapshot for aws-ha-acme.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws-ha-antiaffinity.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws-ha-log.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: DEBUG + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + - stdout:// + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws-ha.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for azure.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-azure-cluster + cluster_name: test-azure-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-azure-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - postgresql://teleport@mypostgresinstance.postgres.database.azure.com/teleport_audit?sslmode=verify-full#auth_mode=azure + - stdout:// + audit_sessions_uri: azblob://mystorageaccount.blob.core.windows.net + auth_mode: azure + conn_string: postgresql://teleport@mypostgresinstance.postgres.database.azure.com/teleport_backend?sslmode=verify-full&pool_max_conns=100 + type: postgresql + version: v3 +matches snapshot for azure.yaml without pool_max_conn: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-azure-cluster + cluster_name: test-azure-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-azure-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - postgresql://teleport@mypostgresinstance.postgres.database.azure.com/teleport_audit?sslmode=verify-full#auth_mode=azure + - stdout:// + audit_sessions_uri: azblob://mystorageaccount.blob.core.windows.net + auth_mode: azure + conn_string: postgresql://teleport@mypostgresinstance.postgres.database.azure.com/teleport_backend?sslmode=verify-full + type: postgresql + version: v3 +matches snapshot for existing-tls-secret-with-ca.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-cluster-name + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for existing-tls-secret.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-cluster-name + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for gcp-ha-acme.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-gcp-cluster + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for gcp-ha-antiaffinity.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-gcp-cluster + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for gcp-ha-log.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-gcp-cluster + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: DEBUG + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + - stdout:// + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for gcp.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-gcp-cluster + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for initcontainers.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for kube-cluster-name.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-kube-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for log-basic.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-log-cluster + cluster_name: test-log-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-log-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: json + output: stderr + severity: INFO + version: v3 +matches snapshot for log-extra.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-log-cluster + cluster_name: test-log-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-log-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - level + - timestamp + - component + - caller + output: json + output: /var/lib/teleport/test.log + severity: DEBUG + version: v3 +matches snapshot for log-legacy.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-log-cluster + cluster_name: test-log-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-log-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: DEBUG + version: v3 +matches snapshot for priority-class-name.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for proxy-listener-mode-multiplex.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-proxy-listener-mode + cluster_name: test-proxy-listener-mode + enabled: true + proxy_listener_mode: multiplex + kubernetes_service: + enabled: true + kube_cluster_name: test-proxy-listener-mode + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for proxy-listener-mode-separate.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-proxy-listener-mode + cluster_name: test-proxy-listener-mode + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-proxy-listener-mode + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for public-addresses.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for separate-mongo-listener.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for separate-postgres-listener.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for service.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for session-recording.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + session_recording: node-sync + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for standalone-customsize.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-standalone-cluster + cluster_name: test-standalone-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-standalone-cluster + labels: + env: standalone + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for standalone-existingpvc.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-standalone-cluster + cluster_name: test-standalone-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-standalone-cluster + labels: + env: standalone + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for tolerations.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-aws-cluster + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for version-override.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: test-cluster-name + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + labels: + env: test + version: 5.2.1 + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for volumes.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + auth_server: 127.0.0.1:3025 + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 diff --git a/helm/old/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap new file mode 100644 index 0000000..f3f40c9 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap @@ -0,0 +1,518 @@ +should add an operator side-car when operator is enabled: + 1: | + image: public.ecr.aws/gravitational/teleport-operator:13.3.9 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true +? should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName + is set and persistence.enabled is false +: 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + - containerPort: 3025 + name: auth + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + serviceAccountName: RELEASE-NAME + terminationGracePeriodSeconds: 60 + volumes: + - name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-auth + name: config + - emptyDir: {} + name: data +should provision initContainer correctly when set in values: + 1: | + - args: + - echo test + image: alpine + name: teleport-init + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + - args: + - echo test2 + image: alpine + name: teleport-init2 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true +should set affinity when set in values: + 1: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport +should set imagePullSecrets when set in values: + 1: | + - name: myRegistryKeySecretName +should set nodeSelector when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + - containerPort: 3025 + name: auth + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + nodeSelector: + environment: security + role: bastion + serviceAccountName: RELEASE-NAME + terminationGracePeriodSeconds: 60 + volumes: + - name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME +should set required affinity when highAvailability.requireAntiAffinity is set: + 1: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - RELEASE-NAME + - key: app.kubernetes.io/component + operator: In + values: + - auth + topologyKey: kubernetes.io/hostname +should set resources when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + - containerPort: 3025 + name: auth + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + serviceAccountName: RELEASE-NAME + terminationGracePeriodSeconds: 60 + volumes: + - name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME +should set securityContext when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + - containerPort: 3025 + name: auth + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + serviceAccountName: RELEASE-NAME + terminationGracePeriodSeconds: 60 + volumes: + - name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME +should set tolerations when set in values: + 1: | + - effect: NoExecute + key: dedicated + operator: Equal + value: teleport + - effect: NoSchedule + key: dedicated + operator: Equal + value: teleport +should use OSS image and not mount license when enterprise is not set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport-distroless:12.2.1 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + - containerPort: 3025 + name: auth + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + serviceAccountName: RELEASE-NAME + terminationGracePeriodSeconds: 60 + volumes: + - name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME diff --git a/helm/old/teleport-cluster/tests/__snapshot__/ingress_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/ingress_test.yaml.snap new file mode 100644 index 0000000..f8a7288 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/ingress_test.yaml.snap @@ -0,0 +1,55 @@ +does not add additional wildcard publicAddrs when Ingress is enabled and a publicAddr already contains a wildcard: + 1: | + - hosts: + - helm-lint.example.com + - '*.helm-lint.example.com' + - helm-lint-second-domain.example.com + - '*.helm-lint-second-domain.example.com' +does not set a wildcard of clusterName as a hostname when Ingress is enabled and ingress.suppressAutomaticWildcards is true: + 1: | + - hosts: + - teleport.example.com +? does not set a wildcard of publicAddr as a hostname when Ingress is enabled, publicAddr + is set and ingress.suppressAutomaticWildcards is true +: 1: | + - hosts: + - helm-lint.example.com +does not set tls.secretName by default: + 1: | + - hosts: + - teleport.example.com + - '*.teleport.example.com' +exposes all publicAddrs and wildcard publicAddrs as hostnames when Ingress is enabled and multiple publicAddrs are set: + 1: | + - hosts: + - helm-lint.example.com + - helm-lint-second-domain.example.com + - '*.helm-lint.example.com' + - '*.helm-lint-second-domain.example.com' +sets the clusterName and wildcard of clusterName as hostnames when Ingress is enabled: + 1: | + - hosts: + - teleport.example.com + - '*.teleport.example.com' +sets the publicAddr and wildcard of publicAddr as hostnames when Ingress is enabled and publicAddr is set: + 1: | + - hosts: + - helm-lint.example.com + - '*.helm-lint.example.com' +sets tls.secretName the value of tls.existingSecretName when set: + 1: | + - hosts: + - teleport.example.com + - '*.teleport.example.com' + secretName: helm-lint-tls-secret +sets tls.secretName when cert-manager is enabled: + 1: | + - hosts: + - teleport.example.com + - '*.teleport.example.com' + secretName: teleport-tls +trims ports from publicAddr and uses it as the hostname when Ingress is enabled and publicAddr is set: + 1: | + - hosts: + - helm-lint.example.com + - '*.helm-lint.example.com' diff --git a/helm/old/teleport-cluster/tests/__snapshot__/predeploy_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/predeploy_test.yaml.snap new file mode 100644 index 0000000..288859d --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/predeploy_test.yaml.snap @@ -0,0 +1,6 @@ +should set imagePullSecrets on auth predeploy job when set in values: + 1: | + - name: myRegistryKeySecretName +should set imagePullSecrets on proxy predeploy job when set in values: + 1: | + - name: myRegistryKeySecretName diff --git a/helm/old/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap new file mode 100644 index 0000000..319cbd8 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap @@ -0,0 +1,16 @@ +should request a certificate for cluster name when cert-manager is enabled (cert-manager.yaml): + 1: | + - test-cluster + - '*.test-cluster' + 2: | + group: custom.cert-manager.io + kind: CustomClusterIssuer + name: custom +should request a certificate for cluster name when cert-manager is enabled (cert-secret.yaml): + 1: | + - test-cluster + - '*.test-cluster' + 2: | + group: cert-manager.io + kind: Issuer + name: letsencrypt diff --git a/helm/old/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap new file mode 100644 index 0000000..d2858df --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap @@ -0,0 +1,530 @@ +generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled is not set: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: helm-test.example.com:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled=true: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + public_addr: helm-test.example.com:443 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled is not set: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: helm-test.example.com:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +generates a config with a clusterName containing a regular string: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: helm-test.example.com:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +generates a config with proxy_service.trust_x_forwarded_for=true when version = 14.0.0-rc.1 and ingress.enabled=true: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + public_addr: helm-test.example.com:443 + trust_x_forwarded_for: true + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +generates a config with proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled=true: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + public_addr: helm-test.example.com:443 + trust_x_forwarded_for: true + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for acme-on.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + acme: + email: test@email.com + enabled: true + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-acme-cluster:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for acme-uri-staging.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + acme: + email: test@email.com + enabled: true + uri: https://acme-staging-v02.api.letsencrypt.org/directory + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-acme-cluster:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for aws-ha-acme.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + https_keypairs: + - cert_file: /etc/teleport-tls/tls.crt + key_file: /etc/teleport-tls/tls.key + https_keypairs_reload_interval: 12h + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-aws-cluster:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for existing-tls-secret.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + https_keypairs: + - cert_file: /etc/teleport-tls/tls.crt + key_file: /etc/teleport-tls/tls.key + https_keypairs_reload_interval: 12h + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-cluster-name:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for log-basic.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-log-cluster:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: json + output: stderr + severity: INFO + version: v3 +matches snapshot for log-extra.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-log-cluster:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - level + - timestamp + - component + - caller + output: json + output: /var/lib/teleport/test.log + severity: DEBUG + version: v3 +matches snapshot for proxy-listener-mode-multiplex.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + public_addr: test-proxy-listener-mode:443 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for proxy-listener-mode-separate.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: test-proxy-listener-mode:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for public-addresses.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + kube_public_addr: + - loadbalancer.example.com:3026 + listen_addr: 0.0.0.0:3023 + mongo_listen_addr: 0.0.0.0:27017 + mongo_public_addr: + - loadbalancer.example.com:27017 + mysql_listen_addr: 0.0.0.0:3036 + mysql_public_addr: + - loadbalancer.example.com:3036 + postgres_listen_addr: 0.0.0.0:5432 + postgres_public_addr: + - loadbalancer.example.com:5432 + public_addr: + - loadbalancer.example.com:443 + ssh_public_addr: + - loadbalancer.example.com:3023 + tunnel_listen_addr: 0.0.0.0:3024 + tunnel_public_addr: + - loadbalancer.example.com:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for separate-mongo-listener.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mongo_listen_addr: 0.0.0.0:27017 + mongo_public_addr: helm-lint:27017 + mysql_listen_addr: 0.0.0.0:3036 + public_addr: helm-lint:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for separate-postgres-listener.yaml: + 1: | + |- + auth_service: + enabled: false + proxy_service: + enabled: true + kube_listen_addr: 0.0.0.0:3026 + listen_addr: 0.0.0.0:3023 + mysql_listen_addr: 0.0.0.0:3036 + postgres_listen_addr: 0.0.0.0:5432 + postgres_public_addr: helm-lint:5432 + public_addr: helm-lint:443 + tunnel_listen_addr: 0.0.0.0:3024 + ssh_service: + enabled: false + teleport: + auth_server: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3025 + join_params: + method: kubernetes + token_name: RELEASE-NAME-proxy + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 diff --git a/helm/old/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap new file mode 100644 index 0000000..73629a8 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap @@ -0,0 +1,495 @@ +should provision initContainer correctly when set in values: + 1: | + - command: + - teleport + - wait + - no-resolve + - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + name: wait-auth-update + - args: + - echo test + image: alpine + name: teleport-init + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - args: + - echo test2 + image: alpine + name: teleport-init2 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data +should set affinity when set in values: + 1: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport +should set imagePullSecrets when set in values: + 1: | + - name: myRegistryKeySecretName +should set nodeSelector when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3080 + name: tls + protocol: TCP + - containerPort: 3023 + name: sshproxy + protocol: TCP + - containerPort: 3024 + name: sshtun + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + - containerPort: 3036 + name: mysql + protocol: TCP + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true + initContainers: + - command: + - teleport + - wait + - no-resolve + - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + name: wait-auth-update + nodeSelector: + environment: security + role: bastion + serviceAccountName: RELEASE-NAME-proxy + terminationGracePeriodSeconds: 60 + volumes: + - name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-proxy + name: config + - emptyDir: {} + name: data +should set required affinity when highAvailability.requireAntiAffinity is set: + 1: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - RELEASE-NAME + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: kubernetes.io/hostname +should set resources when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3080 + name: tls + protocol: TCP + - containerPort: 3023 + name: sshproxy + protocol: TCP + - containerPort: 3024 + name: sshtun + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + - containerPort: 3036 + name: mysql + protocol: TCP + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true + initContainers: + - command: + - teleport + - wait + - no-resolve + - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + name: wait-auth-update + serviceAccountName: RELEASE-NAME-proxy + terminationGracePeriodSeconds: 60 + volumes: + - name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-proxy + name: config + - emptyDir: {} + name: data +should set securityContext for initContainers when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3080 + name: tls + protocol: TCP + - containerPort: 3023 + name: sshproxy + protocol: TCP + - containerPort: 3024 + name: sshtun + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + - containerPort: 3036 + name: mysql + protocol: TCP + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true + initContainers: + - command: + - teleport + - wait + - no-resolve + - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + name: wait-auth-update + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + serviceAccountName: RELEASE-NAME-proxy + terminationGracePeriodSeconds: 60 + volumes: + - name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-proxy + name: config + - emptyDir: {} + name: data +should set securityContext when set in values: + 1: | + affinity: + podAntiAffinity: null + automountServiceAccountToken: false + containers: + - args: + - --diag-addr=0.0.0.0:3000 + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - teleport + - wait + - duration + - 30s + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3080 + name: tls + protocol: TCP + - containerPort: 3023 + name: sshproxy + protocol: TCP + - containerPort: 3024 + name: sshtun + protocol: TCP + - containerPort: 3026 + name: kube + protocol: TCP + - containerPort: 3036 + name: mysql + protocol: TCP + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true + initContainers: + - command: + - teleport + - wait + - no-resolve + - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + name: wait-auth-update + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + serviceAccountName: RELEASE-NAME-proxy + terminationGracePeriodSeconds: 60 + volumes: + - name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.namespace + path: namespace + - configMap: + name: RELEASE-NAME-proxy + name: config + - emptyDir: {} + name: data +should set tolerations when set in values: + 1: | + - effect: NoExecute + key: dedicated + operator: Equal + value: teleport + - effect: NoSchedule + key: dedicated + operator: Equal + value: teleport diff --git a/helm/old/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap new file mode 100644 index 0000000..a10b5e5 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap @@ -0,0 +1,68 @@ +does not expose separate listener ports by default when ingress.enabled=true: + 1: | + - name: tls + port: 443 + protocol: TCP + targetPort: 3080 +does not expose separate listener ports when running in separate mode and ingress.enabled=true: + 1: | + - name: tls + port: 443 + protocol: TCP + targetPort: 3080 +exposes a single port when running in multiplex mode: + 1: | + - name: tls + port: 443 + protocol: TCP + targetPort: 3080 +exposes a single port when running in multiplex mode and ingress.enabled=true: + 1: | + - name: tls + port: 443 + protocol: TCP + targetPort: 3080 +exposes separate listener ports by default: + 1: | + - name: tls + port: 443 + protocol: TCP + targetPort: 3080 + - name: sshproxy + port: 3023 + protocol: TCP + targetPort: 3023 + - name: k8s + port: 3026 + protocol: TCP + targetPort: 3026 + - name: sshtun + port: 3024 + protocol: TCP + targetPort: 3024 + - name: mysql + port: 3036 + protocol: TCP + targetPort: 3036 +exposes separate listener ports when running in separate mode: + 1: | + - name: tls + port: 443 + protocol: TCP + targetPort: 3080 + - name: sshproxy + port: 3023 + protocol: TCP + targetPort: 3023 + - name: k8s + port: 3026 + protocol: TCP + targetPort: 3026 + - name: sshtun + port: 3024 + protocol: TCP + targetPort: 3024 + - name: mysql + port: 3036 + protocol: TCP + targetPort: 3036 diff --git a/helm/old/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap b/helm/old/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap new file mode 100644 index 0000000..d950054 --- /dev/null +++ b/helm/old/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap @@ -0,0 +1,62 @@ +creates a PodSecurityPolicy when enabled in values and supported: + 1: | + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default + name: RELEASE-NAME + spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - '*' + 2: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: RELEASE-NAME-psp + namespace: NAMESPACE + rules: + - apiGroups: + - policy + resourceNames: + - RELEASE-NAME + resources: + - podsecuritypolicies + verbs: + - use + 3: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: RELEASE-NAME-psp + namespace: NAMESPACE + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: RELEASE-NAME-psp + subjects: + - kind: ServiceAccount + name: RELEASE-NAME diff --git a/helm/old/teleport-cluster/tests/auth_clusterrole_test.yaml b/helm/old/teleport-cluster/tests/auth_clusterrole_test.yaml new file mode 100644 index 0000000..6e26d74 --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_clusterrole_test.yaml @@ -0,0 +1,19 @@ +suite: Auth ClusterRole +templates: + - auth/clusterrole.yaml +tests: + - it: creates a ClusterRole + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ClusterRole + - it: adds operator permissions to ClusterRole + values: + - ../.lint/operator.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ClusterRole + - matchSnapshot: {} diff --git a/helm/old/teleport-cluster/tests/auth_clusterrolebinding_test.yaml b/helm/old/teleport-cluster/tests/auth_clusterrolebinding_test.yaml new file mode 100644 index 0000000..45117b1 --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_clusterrolebinding_test.yaml @@ -0,0 +1,20 @@ +suite: Auth ClusterRoleBinding +templates: + - auth/clusterrolebinding.yaml +tests: + - it: creates a ClusterRoleBinding + asserts: + - hasDocuments: + count: 2 + - isKind: + of: ClusterRoleBinding + - it: uses the provided serviceAccount name + values: + - ../.lint/service-account.yaml + asserts: + - contains: + path: subjects + any: true + content: + kind: ServiceAccount + name: "helm-lint" diff --git a/helm/old/teleport-cluster/tests/auth_config_test.yaml b/helm/old/teleport-cluster/tests/auth_config_test.yaml new file mode 100644 index 0000000..ea2ed14 --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_config_test.yaml @@ -0,0 +1,512 @@ +suite: ConfigMap +templates: + - auth/config.yaml +tests: + - it: matches snapshot for acme-off.yaml + values: + - ../.lint/acme-off.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for acme-on.yaml + values: + - ../.lint/acme-on.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for acme-uri-staging.yaml + values: + - ../.lint/acme-on.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: wears annotations (annotations.yaml) + values: + - ../.lint/annotations.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - equal: + path: metadata.annotations.kubernetes\.io/config + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/config-different + value: 2 + + - it: matches snapshot for auth-connector-name.yaml + values: + - ../.lint/auth-connector-name.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-disable-local.yaml + values: + - ../.lint/auth-disable-local.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-locking-mode.yaml + values: + - ../.lint/auth-locking-mode.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-passwordless.yaml + values: + - ../.lint/auth-passwordless.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-type.yaml + values: + - ../.lint/auth-type.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-type-legacy.yaml + values: + - ../.lint/auth-type-legacy.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-webauthn.yaml + values: + - ../.lint/auth-webauthn.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for auth-webauthn-legacy.yaml + values: + - ../.lint/auth-webauthn-legacy.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws.yaml + values: + - ../.lint/aws.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws-dynamodb-autoscaling.yaml + values: + - ../.lint/aws-dynamodb-autoscaling.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws-ha.yaml + values: + - ../.lint/aws-ha.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws-ha-acme.yaml + values: + - ../.lint/aws-ha-acme.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws-ha-antiaffinity.yaml + values: + - ../.lint/aws-ha-antiaffinity.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws-ha-log.yaml + values: + - ../.lint/aws-ha-log.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for existing-tls-secret.yaml + values: + - ../.lint/existing-tls-secret.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for existing-tls-secret-with-ca.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for gcp-ha-acme.yaml + values: + - ../.lint/gcp-ha-acme.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for gcp-ha-antiaffinity.yaml + values: + - ../.lint/gcp-ha-antiaffinity.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for gcp-ha-log.yaml + values: + - ../.lint/gcp-ha-log.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for gcp.yaml + values: + - ../.lint/gcp.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for initcontainers.yaml + values: + - ../.lint/initcontainers.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for kube-cluster-name.yaml + values: + - ../.lint/kube-cluster-name.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for log-basic.yaml + values: + - ../.lint/log-basic.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for log-extra.yaml + values: + - ../.lint/log-extra.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for log-legacy.yaml + values: + - ../.lint/log-legacy.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for priority-class-name.yaml + values: + - ../.lint/priority-class-name.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for proxy-listener-mode-multiplex.yaml + values: + - ../.lint/proxy-listener-mode-multiplex.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for proxy-listener-mode-separate.yaml + values: + - ../.lint/proxy-listener-mode-separate.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for service.yaml + values: + - ../.lint/service.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for separate-mongo-listener.yaml + values: + - ../.lint/separate-mongo-listener.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for separate-postgres-listener.yaml + values: + - ../.lint/separate-postgres-listener.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for public-addresses.yaml + values: + - ../.lint/public-addresses.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for session-recording.yaml + values: + - ../.lint/session-recording.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for standalone-customsize.yaml + values: + - ../.lint/standalone-customsize.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for standalone-existingpvc.yaml + values: + - ../.lint/standalone-existingpvc.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for tolerations.yaml + values: + - ../.lint/tolerations.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for version-override.yaml + values: + - ../.lint/version-override.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for volumes.yaml + values: + - ../.lint/volumes.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: adds a proxy token by default + set: + clusterName: teleport.example.com + asserts: + - notEqual: + path: data.apply-on-startup\.yaml + value: null + - matchSnapshot: + path: data.apply-on-startup\.yaml + + - it: matches snapshot for azure.yaml + values: + - ../.lint/azure.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for azure.yaml without pool_max_conn + values: + - ../.lint/azure.yaml + set: + azure: + databasePoolMaxConnections: 0 + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: sets "provisioned" billing mode when autoscaling is enabled + values: + - ../.lint/aws-dynamodb-autoscaling.yaml + asserts: + - matchRegex: + path: data.teleport\.yaml + pattern: 'billing_mode: provisioned' diff --git a/helm/old/teleport-cluster/tests/auth_deployment_test.yaml b/helm/old/teleport-cluster/tests/auth_deployment_test.yaml new file mode 100644 index 0000000..cc8cb58 --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_deployment_test.yaml @@ -0,0 +1,826 @@ +suite: Auth Deployment +templates: + - auth/deployment.yaml + - auth/config.yaml +tests: + - it: sets Statefulset annotations when specified + template: auth/deployment.yaml + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/deployment + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/deployment-different + value: 3 + + - it: sets Pod annotations when specified + template: auth/deployment.yaml + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: spec.template.metadata.annotations.kubernetes\.io/pod + value: test-annotation + - equal: + path: spec.template.metadata.annotations.kubernetes\.io/pod-different + value: 4 + + - it: should not have more than one replica in standalone mode + template: auth/deployment.yaml + set: + chartMode: standalone + clusterName: helm-lint.example.com + asserts: + - equal: + path: spec.replicas + value: 1 + + - it: should have multiple replicas when replicaCount is set + template: auth/deployment.yaml + set: + chartMode: scratch + clusterName: helm-lint.example.com + highAvailability: + replicaCount: 3 + asserts: + - equal: + path: spec.replicas + value: 3 + + - it: should set affinity when set in values + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport + asserts: + - isNotNull: + path: spec.template.spec.affinity + - matchSnapshot: + path: spec.template.spec.affinity + + - it: should set nodeSelector when set in values + template: auth/deployment.yaml + set: + chartMode: scratch + clusterName: helm-lint.example.com + nodeSelector: + role: bastion + environment: security + asserts: + - isNotNull: + path: spec.template.spec.nodeSelector + - matchSnapshot: + path: spec.template.spec + + - it: should set required affinity when highAvailability.requireAntiAffinity is set + template: auth/deployment.yaml + values: + - ../.lint/aws-ha-antiaffinity.yaml + asserts: + - isNotNull: + path: spec.template.spec.affinity + - isNotNull: + path: spec.template.spec.affinity.podAntiAffinity + - isNotNull: + path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution + - matchSnapshot: + path: spec.template.spec.affinity + + - it: should set tolerations when set in values + template: auth/deployment.yaml + values: + - ../.lint/tolerations.yaml + asserts: + - isNotNull: + path: spec.template.spec.tolerations + - matchSnapshot: + path: spec.template.spec.tolerations + + - it: should set resources when set in values + template: auth/deployment.yaml + values: + - ../.lint/resources.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.containers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.containers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.containers[0].resources.requests.memory + value: 2Gi + - matchSnapshot: + path: spec.template.spec + + - it: should set securityContext when set in values + template: auth/deployment.yaml + values: + - ../.lint/security-context.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.privileged + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.runAsGroup + value: 99 + - equal: + path: spec.template.spec.containers[0].securityContext.runAsNonRoot + value: true + - equal: + path: spec.template.spec.containers[0].securityContext.runAsUser + value: 99 + - matchSnapshot: + path: spec.template.spec + + - it: should not set securityContext when is empty object (default value) + template: auth/deployment.yaml + values: + - ../.lint/security-context-empty.yaml + asserts: + - isNull: + path: spec.template.spec.containers[0].securityContext + + # we can't use the dynamic chart version or appVersion as a variable in the tests, + # so we override it manually and check that gets set instead + # this saves us having to update the test every time we cut a new release + - it: should use enterprise image and mount license when enterprise is set in values + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + enterprise: true + teleportVersionOverride: 12.2.1 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: public.ecr.aws/gravitational/teleport-ent-distroless:12.2.1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/lib/license + name: "license" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: license + secret: + secretName: license + + - it: should use OSS image and not mount license when enterprise is not set in values + template: auth/deployment.yaml + set: + clusterName: helm-lint + teleportVersionOverride: 12.2.1 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: public.ecr.aws/gravitational/teleport-distroless:12.2.1 + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/lib/license + name: "license" + readOnly: true + - notContains: + path: spec.template.spec.volumes + content: + name: license + secret: + secretName: license + - matchSnapshot: + path: spec.template.spec + + - it: should mount GCP credentials in GCP mode + template: auth/deployment.yaml + values: + - ../.lint/gcp-ha.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: gcp-credentials + secret: + secretName: teleport-gcp-credentials + + - it: should not mount secret when credentialSecretName is blank in values + template: auth/deployment.yaml + values: + - ../.lint/gcp-ha-workload.yaml + asserts: + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true + - notContains: + path: spec.template.spec.volumes + content: + name: gcp-credentials + secret: + secretName: teleport-gcp-credentials + + - it: should mount GCP credentials for initContainer in GCP mode + template: auth/deployment.yaml + values: + - ../.lint/gcp-ha.yaml + - ../.lint/initcontainers.yaml + asserts: + - contains: + path: spec.template.spec.initContainers[0].volumeMounts + content: + mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true + + - it: should mount ConfigMap containing Teleport config + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport + name: "config" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: config + configMap: + name: RELEASE-NAME-auth + + - it: should mount extraVolumes and extraVolumeMounts on container and initContainers + template: auth/deployment.yaml + values: + - ../.lint/volumes.yaml + - ../.lint/initcontainers.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.initContainers[0].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.initContainers[1].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.volumes + content: + name: my-mount + secret: + secretName: mySecret + - it: should set imagePullPolicy when set in values + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + imagePullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: should set environment when extraEnv set in values + template: auth/deployment.yaml + values: + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: SOME_ENVIRONMENT_VARIABLE + value: "some-value" + + - it: should set imagePullSecrets when set in values + template: auth/deployment.yaml + values: + - ../.lint/imagepullsecrets.yaml + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: myRegistryKeySecretName + - matchSnapshot: + path: spec.template.spec.imagePullSecrets + + - it: should provision initContainer correctly when set in values + template: auth/deployment.yaml + values: + - ../.lint/initcontainers.yaml + - ../.lint/resources.yaml + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.initContainers[0].args + content: "echo test" + - equal: + path: spec.template.spec.initContainers[0].name + value: "teleport-init" + - equal: + path: spec.template.spec.initContainers[0].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[0].resources.requests.memory + value: 2Gi + - contains: + path: spec.template.spec.initContainers[1].args + content: "echo test2" + - equal: + path: spec.template.spec.initContainers[1].name + value: "teleport-init2" + - equal: + path: spec.template.spec.initContainers[1].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[1].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[1].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[1].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[1].resources.requests.memory + value: 2Gi + - matchSnapshot: + path: spec.template.spec.initContainers + + - it: should add insecureSkipProxyTLSVerify to args when set in values + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + insecureSkipProxyTLSVerify: true + asserts: + - contains: + path: spec.template.spec.containers[0].args + content: "--insecure" + + - it: should expose diag port + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: diag + containerPort: 3000 + protocol: TCP + + - it: should expose auth port + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: auth + containerPort: 3025 + protocol: TCP + + - it: should expose kube port + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: kube + containerPort: 3026 + protocol: TCP + + - it: should set postStart command if set in values + template: auth/deployment.yaml + set: + clusterName: helm-lint.example.com + postStart: + command: ["/bin/echo", "test"] + asserts: + - equal: + path: spec.template.spec.containers[0].lifecycle.postStart.exec.command + value: ["/bin/echo", "test"] + + - it: should add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is true + template: auth/deployment.yaml + set: + chartMode: standalone + clusterName: helm-lint.example.com + persistence: + enabled: true + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: RELEASE-NAME + + - it: should not add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is false + template: auth/deployment.yaml + set: + chartMode: standalone + clusterName: helm-lint.example.com + persistence: + enabled: false + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: RELEASE-NAME + + - it: should add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is true + template: auth/deployment.yaml + set: + chartMode: scratch + clusterName: helm-lint.example.com + persistence: + enabled: true + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: RELEASE-NAME + + - it: should not add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is false + template: auth/deployment.yaml + set: + chartMode: scratch + clusterName: helm-lint.example.com + persistence: + enabled: false + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: RELEASE-NAME + + - it: should add an operator side-car when operator is enabled + template: auth/deployment.yaml + values: + - ../.lint/operator.yaml + asserts: + - equal: + path: spec.template.spec.containers[1].name + value: operator + - matchSnapshot: + path: spec.template.spec.containers[1] + + - it: should add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set and persistence.enabled is true + template: auth/deployment.yaml + values: + - ../.lint/standalone-existingpvc.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: teleport-storage + + - it: should not add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set but persistence.enabled is false + template: auth/deployment.yaml + values: + - ../.lint/standalone-existingpvc.yaml + set: + persistence: + enabled: false + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: teleport-storage + + - it: should add named PersistentVolumeClaim as volume when in scratch mode and persistence.existingClaimName is set + template: auth/deployment.yaml + values: + - ../.lint/standalone-existingpvc.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: teleport-storage + + - it: should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName is set and persistence.enabled is false + template: auth/deployment.yaml + values: + - ../.lint/standalone-existingpvc.yaml + set: + persistence: + enabled: false + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: data + persistentVolumeClaim: + claimName: teleport-storage + - matchSnapshot: + path: spec.template.spec + + - it: should add emptyDir for data in AWS mode + template: auth/deployment.yaml + values: + - ../.lint/aws-ha.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: data + emptyDir: {} + + - it: should add emptyDir for data in GCP mode + template: auth/deployment.yaml + values: + - ../.lint/gcp-ha.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: data + emptyDir: {} + + - it: should set priorityClassName when set in values + template: auth/deployment.yaml + values: + - ../.lint/priority-class-name.yaml + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: system-cluster-critical + + - it: should set probeTimeoutSeconds when set in values + template: auth/deployment.yaml + values: + - ../.lint/probe-timeout-seconds.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds + value: 5 + - equal: + path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds + value: 5 + + - it: should mount tls.existingCASecretName and set environment when set in values + template: auth/deployment.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls-ca + secret: + secretName: helm-lint-existing-tls-secret-ca + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls-ca + name: teleport-tls-ca + readOnly: true + - contains: + path: spec.template.spec.containers[0].env + content: + name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + + - it: should mount tls.existingCASecretName and set extra environment when set in values + template: auth/deployment.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls-ca + secret: + secretName: helm-lint-existing-tls-secret-ca + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls-ca + name: teleport-tls-ca + readOnly: true + - contains: + path: spec.template.spec.containers[0].env + content: + name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + - contains: + path: spec.template.spec.containers[0].env + content: + name: SOME_ENVIRONMENT_VARIABLE + value: some-value + + - it: should set minReadySeconds when replicaCount > 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + minReadySeconds: 60 + replicaCount: 3 + asserts: + - equal: + path: spec.minReadySeconds + value: 60 + + - it: should not set minReadySeconds when replicaCount = 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + minReadySeconds: 60 + replicaCount: 1 + asserts: + - equal: + path: spec.minReadySeconds + value: null + + - it: should use Recreate strategy when replicaCount = 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + replicaCount: 1 + asserts: + - equal: + path: spec.strategy.type + value: Recreate + + - it: should not set strategy when replicaCount > 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + replicaCount: 2 + asserts: + - equal: + path: spec.strategy.type + value: RollingUpdate + + - it: should not perform surge rolling updates when replicaCount > 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + replicaCount: 2 + asserts: + - equal: + path: spec.strategy.rollingUpdate.maxSurge + value: 0 + - equal: + path: spec.strategy.rollingUpdate.maxUnavailable + value: 1 + + - it: mounts regular tokens on older Kubernetes versions + template: auth/deployment.yaml + set: + clusterName: helm-lint + operator: + enabled: true + capabilities: + majorVersion: 1 + minorVersion: 18 + asserts: + - notEqual: + path: spec.template.spec.automountServiceAccountToken + value: false + - notContains: + path: spec.template.spec.volumes + content: + name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + - notContains: + path: spec.template.spec.containers[1].volumeMounts + content: + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + + - it: mounts tokens through projected volumes on newer Kubernetes versions + template: auth/deployment.yaml + set: + clusterName: helm-lint + operator: + enabled: true + capabilities: + majorVersion: 1 + minorVersion: 21 + asserts: + - equal: + path: spec.template.spec.automountServiceAccountToken + value: false + - contains: + path: spec.template.spec.volumes + content: + name: auth-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + - contains: + path: spec.template.spec.containers[1].volumeMounts + content: + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: auth-serviceaccount-token + readOnly: true + + - it: should add the azure workload identity label to auth pods in azure mode + template: auth/deployment.yaml + set: + chartMode: azure + clusterName: teleport.example.com + asserts: + - equal: + path: spec.template.metadata.labels.azure\.workload\.identity/use + value: "true" diff --git a/helm/old/teleport-cluster/tests/auth_pdb_test.yaml b/helm/old/teleport-cluster/tests/auth_pdb_test.yaml new file mode 100644 index 0000000..0ef9aad --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_pdb_test.yaml @@ -0,0 +1,23 @@ +suite: Auth PodDisruptionBudget +templates: + - auth/pdb.yaml +tests: + - it: not should create a PDB when disabled in values + set: + highAvailability: + podDisruptionBudget: + enabled: false + asserts: + - hasDocuments: + count: 0 + - it: should create a PDB when enabled in values (pdb.yaml) + values: + - ../.lint/pdb.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PodDisruptionBudget + - equal: + path: spec.minAvailable + value: 2 diff --git a/helm/old/teleport-cluster/tests/auth_pvc_test.yaml b/helm/old/teleport-cluster/tests/auth_pvc_test.yaml new file mode 100644 index 0000000..3fbd87c --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_pvc_test.yaml @@ -0,0 +1,87 @@ +suite: Auth PersistentVolumeClaim +templates: + - auth/pvc.yaml +tests: + - it: creates a PersistentVolumeClaim when chartMode=standalone with default size + set: + chartMode: standalone + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PersistentVolumeClaim + - equal: + path: spec.resources.requests.storage + value: "10Gi" + + - it: creates a PersistentVolumeClaim when chartMode=scratch + set: + chartMode: scratch + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PersistentVolumeClaim + + - it: uses a custom size when set + values: + - ../.lint/standalone-customsize.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PersistentVolumeClaim + - equal: + path: spec.resources.requests.storage + value: 50Gi + + - it: uses a custom storage class when set + values: + - ../.lint/standalone-custom-storage-class.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PersistentVolumeClaim + - equal: + path: spec.storageClassName + value: ebs-ssd + + - it: does not create a PersistentVolumeClaim when chartMode=standalone and existingClaimName is not blank + set: + chartMode: standalone + persistence: + existingClaimName: test-claim + asserts: + - hasDocuments: + count: 0 + + - it: does not create a PersistentVolumeClaim when chartMode=scratch and existingClaimName is not blank + set: + chartMode: scratch + persistence: + existingClaimName: test-claim + asserts: + - hasDocuments: + count: 0 + + - it: does not create a PersistentVolumeClaim when chartMode=aws + set: + chartMode: aws + asserts: + - hasDocuments: + count: 0 + + - it: does not create a PersistentVolumeClaim when chartMode=gcp + set: + chartMode: gcp + asserts: + - hasDocuments: + count: 0 + + - it: does not create a PersistentVolumeClaim when chartMode=azure + set: + chartMode: azure + asserts: + - hasDocuments: + count: 0 diff --git a/helm/old/teleport-cluster/tests/auth_serviceaccount_test.yaml b/helm/old/teleport-cluster/tests/auth_serviceaccount_test.yaml new file mode 100644 index 0000000..532407f --- /dev/null +++ b/helm/old/teleport-cluster/tests/auth_serviceaccount_test.yaml @@ -0,0 +1,32 @@ +suite: Auth ServiceAccount +templates: + - auth/serviceaccount.yaml +tests: + - it: sets ServiceAccount annotations when specified + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/serviceaccount + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/serviceaccount-different + value: 6 + + - it: changes ServiceAccount name when specified + values: + - ../.lint/service-account.yaml + asserts: + - equal: + path: metadata.name + value: "helm-lint" + + - it: sets Azure client ID when set + set: + chartMode: azure + azure: + clientID: "1234" + asserts: + - equal: + path: metadata.annotations.azure\.workload\.identity/client-id + value: "1234" diff --git a/helm/old/teleport-cluster/tests/ingress_test.yaml b/helm/old/teleport-cluster/tests/ingress_test.yaml new file mode 100644 index 0000000..b750167 --- /dev/null +++ b/helm/old/teleport-cluster/tests/ingress_test.yaml @@ -0,0 +1,538 @@ +suite: Proxy Ingress +templates: + - proxy/ingress.yaml +tests: + - it: does not create an Ingress by default + set: + clusterName: teleport.example.com + asserts: + - hasDocuments: + count: 0 + + - it: creates an Ingress when ingress.enabled=true and proxyListenerMode=multiplex + values: + - ../.lint/ingress.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Ingress + + - it: fails to deploy an Ingress when ingress.enabled=true and proxyListenerMode is not set + values: + - ../.lint/ingress.yaml + set: + proxyListenerMode: "" + asserts: + - failedTemplate: + errorMessage: "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/" + + - it: fails to deploy an Ingress when ingress.enabled=true and proxyListenerMode=separate + values: + - ../.lint/ingress.yaml + set: + proxyListenerMode: separate + asserts: + - failedTemplate: + errorMessage: "Use of an ingress requires TLS multiplexing to be enabled, so you must also set proxyListenerMode=multiplex - see https://goteleport.com/docs/architecture/tls-routing/" + + - it: wears annotations when set + values: + - ../.lint/ingress.yaml + set: + annotations: + ingress: + test-annotation: test-annotation-value + another-annotation: some-other-value + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Ingress + - equal: + path: metadata.annotations.test-annotation + value: test-annotation-value + - equal: + path: metadata.annotations.another-annotation + value: some-other-value + + - it: sets the clusterName and wildcard of clusterName as hostnames when Ingress is enabled + values: + - ../.lint/ingress.yaml + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "teleport.example.com" + - contains: + path: spec.tls + content: + hosts: + - "teleport.example.com" + - "*.teleport.example.com" + - equal: + path: spec.rules[0].host + value: "teleport.example.com" + - contains: + path: spec.rules + content: + host: "teleport.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - equal: + path: spec.rules[1].host + value: "*.teleport.example.com" + - contains: + path: spec.rules + content: + host: "*.teleport.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + - it: does not set a wildcard of clusterName as a hostname when Ingress is enabled and ingress.suppressAutomaticWildcards is true + values: + - ../.lint/ingress.yaml + set: + ingress: + suppressAutomaticWildcards: true + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "teleport.example.com" + - contains: + path: spec.tls + content: + hosts: + - "teleport.example.com" + - equal: + path: spec.rules[0].host + value: "teleport.example.com" + - contains: + path: spec.rules + content: + host: "teleport.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - notContains: + path: spec.rules + content: + host: "*.teleport.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + - it: sets the publicAddr and wildcard of publicAddr as hostnames when Ingress is enabled and publicAddr is set + values: + - ../.lint/ingress.yaml + set: + publicAddr: ["helm-lint.example.com"] + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "helm-lint.example.com" + - contains: + path: spec.tls + content: + hosts: + - "helm-lint.example.com" + - "*.helm-lint.example.com" + - equal: + path: spec.rules[0].host + value: helm-lint.example.com + - contains: + path: spec.rules + content: + host: "helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - equal: + path: spec.rules[1].host + value: "*.helm-lint.example.com" + - contains: + path: spec.rules + content: + host: "*.helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + - it: does not set a wildcard of publicAddr as a hostname when Ingress is enabled, publicAddr is set and ingress.suppressAutomaticWildcards is true + values: + - ../.lint/ingress.yaml + set: + publicAddr: ["helm-lint.example.com"] + ingress: + suppressAutomaticWildcards: true + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "helm-lint.example.com" + - contains: + path: spec.tls + content: + hosts: + - "helm-lint.example.com" + - equal: + path: spec.rules[0].host + value: helm-lint.example.com + - contains: + path: spec.rules + content: + host: "helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - notContains: + path: spec.rules + content: + host: "*.helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + - it: trims ports from publicAddr and uses it as the hostname when Ingress is enabled and publicAddr is set + values: + - ../.lint/ingress.yaml + set: + publicAddr: ["helm-lint.example.com:443"] + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "helm-lint.example.com" + - contains: + path: spec.tls + content: + hosts: + - "helm-lint.example.com" + - "*.helm-lint.example.com" + - equal: + path: spec.rules[0].host + value: "helm-lint.example.com" + - contains: + path: spec.rules + content: + host: helm-lint.example.com + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - equal: + path: spec.rules[1].host + value: "*.helm-lint.example.com" + - contains: + path: spec.rules + content: + host: "*.helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + - it: exposes all publicAddrs and wildcard publicAddrs as hostnames when Ingress is enabled and multiple publicAddrs are set + values: + - ../.lint/ingress.yaml + set: + publicAddr: ["helm-lint.example.com", "helm-lint-second-domain.example.com"] + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "helm-lint.example.com" + - equal: + path: spec.tls[0].hosts[1] + value: "helm-lint-second-domain.example.com" + - contains: + path: spec.tls + content: + hosts: + - "helm-lint.example.com" + - "helm-lint-second-domain.example.com" + - "*.helm-lint.example.com" + - "*.helm-lint-second-domain.example.com" + - equal: + path: spec.rules[0].host + value: "helm-lint.example.com" + - equal: + path: spec.rules[1].host + value: "helm-lint-second-domain.example.com" + - equal: + path: spec.rules[2].host + value: "*.helm-lint.example.com" + - equal: + path: spec.rules[3].host + value: "*.helm-lint-second-domain.example.com" + - contains: + path: spec.rules + content: + host: "helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - contains: + path: spec.rules + content: + host: "helm-lint-second-domain.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - contains: + path: spec.rules + content: + host: "*.helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - contains: + path: spec.rules + content: + host: "*.helm-lint-second-domain.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + # this is a very contrived example which wouldn't even work in reality + # it's just to test the logic in the hostname generation code + - it: does not add additional wildcard publicAddrs when Ingress is enabled and a publicAddr already contains a wildcard + values: + - ../.lint/ingress.yaml + set: + publicAddr: ["helm-lint.example.com", "*.helm-lint.example.com", "helm-lint-second-domain.example.com:443"] + asserts: + - equal: + path: spec.tls[0].hosts[0] + value: "helm-lint.example.com" + - equal: + path: spec.tls[0].hosts[1] + value: "*.helm-lint.example.com" + - equal: + path: spec.tls[0].hosts[2] + value: "helm-lint-second-domain.example.com" + - equal: + path: spec.tls[0].hosts[3] + value: "*.helm-lint-second-domain.example.com" + - contains: + path: spec.tls + content: + hosts: + - "helm-lint.example.com" + - "*.helm-lint.example.com" + - "helm-lint-second-domain.example.com" + - "*.helm-lint-second-domain.example.com" + - equal: + path: spec.rules[0].host + value: "helm-lint.example.com" + - equal: + path: spec.rules[1].host + value: "*.helm-lint.example.com" + - equal: + path: spec.rules[2].host + value: "helm-lint-second-domain.example.com" + - equal: + path: spec.rules[3].host + value: "*.helm-lint-second-domain.example.com" + - contains: + path: spec.rules + content: + host: "helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - contains: + path: spec.rules + content: + host: "*.helm-lint.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - contains: + path: spec.rules + content: + host: "helm-lint-second-domain.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - contains: + path: spec.rules + content: + host: "*.helm-lint-second-domain.example.com" + http: + paths: + - backend: + service: + name: RELEASE-NAME + port: + number: 443 + path: / + pathType: Prefix + - matchSnapshot: + path: spec.tls + + - it: sets spec when passed + values: + - ../.lint/ingress.yaml + set: + ingress: + spec: + ingressClassName: nginx + otherSpecStuff: lint + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Ingress + - equal: + path: spec.ingressClassName + value: nginx + - equal: + path: spec.otherSpecStuff + value: lint + + - it: does not set tls.secretName by default + values: + - ../.lint/ingress.yaml + asserts: + - isEmpty: + path: spec.tls[0].secretName + - matchSnapshot: + path: spec.tls + + - it: sets tls.secretName when cert-manager is enabled + values: + - ../.lint/ingress.yaml + set: + highAvailability: + certManager: + enabled: true + asserts: + - equal: + path: spec.tls[0].secretName + value: teleport-tls + - matchSnapshot: + path: spec.tls + + - it: sets tls.secretName the value of tls.existingSecretName when set + values: + - ../.lint/ingress.yaml + set: + tls: + existingSecretName: helm-lint-tls-secret + asserts: + - equal: + path: spec.tls[0].secretName + value: helm-lint-tls-secret + - matchSnapshot: + path: spec.tls diff --git a/helm/old/teleport-cluster/tests/podmonitor_test.yaml b/helm/old/teleport-cluster/tests/podmonitor_test.yaml new file mode 100644 index 0000000..ccdf692 --- /dev/null +++ b/helm/old/teleport-cluster/tests/podmonitor_test.yaml @@ -0,0 +1,40 @@ +suite: PodMonitor +templates: + - podmonitor.yaml +tests: + - it: does not create a PodMonitor by default + set: + clusterName: test-kube-cluster-name + asserts: + - hasDocuments: + count: 0 + + - it: creates a PodMonitor when enabled + set: + clusterName: test-kube-cluster-name + podMonitor: + enabled: true + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PodMonitor + + - it: configures scrape interval if provided + set: + clusterName: test-kube-cluster-name + podMonitor: + enabled: true + interval: 2m + asserts: + - equal: + path: spec.podMetricsEndpoints[0].interval + value: 2m + + - it: wears additional labels if provided + asserts: + - equal: + path: metadata.labels.prometheus + value: default + values: + - ../.lint/podmonitor.yaml \ No newline at end of file diff --git a/helm/old/teleport-cluster/tests/predeploy_test.yaml b/helm/old/teleport-cluster/tests/predeploy_test.yaml new file mode 100644 index 0000000..fb32cfa --- /dev/null +++ b/helm/old/teleport-cluster/tests/predeploy_test.yaml @@ -0,0 +1,111 @@ +suite: Pre-Deploy Config Test Hooks +templates: + - auth/predeploy_job.yaml + - auth/predeploy_config.yaml + - proxy/predeploy_job.yaml + - proxy/predeploy_config.yaml +tests: + - it: Deploys the auth-test config + template: auth/predeploy_config.yaml + set: + clusterName: helm-lint + asserts: + - containsDocument: + kind: ConfigMap + apiVersion: v1 + name: RELEASE-NAME-auth-test + namespace: NAMESPACE + + - it: Deploys the proxy-test config + template: proxy/predeploy_config.yaml + set: + clusterName: helm-lint + asserts: + - containsDocument: + kind: ConfigMap + apiVersion: v1 + name: RELEASE-NAME-proxy-test + namespace: NAMESPACE + + - it: Deploys the auth-test job + template: auth/predeploy_job.yaml + set: + clusterName: helm-lint + asserts: + - containsDocument: + kind: Job + apiVersion: batch/v1 + name: RELEASE-NAME-auth-test + namespace: NAMESPACE + + - it: Is executed as a pre-install and pre-upgrade hook + set: + clusterName: helm-lint + asserts: + - equal: + path: metadata.annotations.helm\.sh/hook + value: pre-install,pre-upgrade + + - it: Does not render hooks when config validation is disabled + set: + clusterName: helm-lint + validateConfigOnDeploy: false + asserts: + - hasDocuments: + count: 0 + - it: should set resources on auth predeploy job when set in values + template: auth/predeploy_job.yaml + values: + - ../.lint/resources.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.containers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.containers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.containers[0].resources.requests.memory + value: 2Gi + - it: should set resources on proxy predeploy job when set in values + template: proxy/predeploy_job.yaml + values: + - ../.lint/resources.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.containers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.containers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.containers[0].resources.requests.memory + value: 2Gi + + - it: should set imagePullSecrets on proxy predeploy job when set in values + template: proxy/predeploy_job.yaml + values: + - ../.lint/imagepullsecrets.yaml + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: myRegistryKeySecretName + - matchSnapshot: + path: spec.template.spec.imagePullSecrets + + - it: should set imagePullSecrets on auth predeploy job when set in values + template: auth/predeploy_job.yaml + values: + - ../.lint/imagepullsecrets.yaml + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: myRegistryKeySecretName + - matchSnapshot: + path: spec.template.spec.imagePullSecrets diff --git a/helm/old/teleport-cluster/tests/proxy_certificate_test.yaml b/helm/old/teleport-cluster/tests/proxy_certificate_test.yaml new file mode 100644 index 0000000..d1d8f0c --- /dev/null +++ b/helm/old/teleport-cluster/tests/proxy_certificate_test.yaml @@ -0,0 +1,29 @@ +suite: Proxy Certificate +templates: + - proxy/certificate.yaml +tests: + - it: should request a certificate for cluster name when cert-manager is enabled (cert-manager.yaml) + values: + - ../.lint/cert-manager.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef + + - it: should request a certificate for cluster name when cert-manager is enabled (cert-secret.yaml) + values: + - ../.lint/cert-secret.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef diff --git a/helm/old/teleport-cluster/tests/proxy_config_test.yaml b/helm/old/teleport-cluster/tests/proxy_config_test.yaml new file mode 100644 index 0000000..cbacce9 --- /dev/null +++ b/helm/old/teleport-cluster/tests/proxy_config_test.yaml @@ -0,0 +1,235 @@ +suite: ConfigMap +templates: + - proxy/config.yaml +tests: + - it: matches snapshot for log-basic.yaml + values: + - ../.lint/log-basic.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for log-extra.yaml + values: + - ../.lint/log-extra.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for public-addresses.yaml + values: + - ../.lint/public-addresses.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: wears annotations (annotations.yaml) + values: + - ../.lint/annotations.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - equal: + path: metadata.annotations.kubernetes\.io/config + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/config-different + value: 2 + + - it: matches snapshot for proxy-listener-mode-multiplex.yaml + values: + - ../.lint/proxy-listener-mode-multiplex.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for proxy-listener-mode-separate.yaml + values: + - ../.lint/proxy-listener-mode-separate.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for separate-mongo-listener.yaml + values: + - ../.lint/separate-mongo-listener.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for separate-postgres-listener.yaml + values: + - ../.lint/separate-postgres-listener.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for aws-ha-acme.yaml + values: + - ../.lint/aws-ha-acme.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for existing-tls-secret.yaml + values: + - ../.lint/existing-tls-secret.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for acme-on.yaml + values: + - ../.lint/acme-on.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: matches snapshot for acme-uri-staging.yaml + values: + - ../.lint/acme-uri-staging.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: generates a config with a clusterName containing a regular string + set: + clusterName: "helm-test.example.com" + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: fails when clusterName contains a regular string and a colon + set: + clusterName: "helm-test:cluster-1" + asserts: + - failedTemplate: + errorMessage: "clusterName must not contain a colon, you can override the cluster's public address with publicAddr" + + - it: fails when clusterName contains a port + set: + clusterName: "helm-test.example.com:443" + asserts: + - failedTemplate: + errorMessage: "clusterName must not contain a colon, you can override the cluster's public address with publicAddr" + + - it: generates a config with proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled=true + chart: + version: 13.2.0 + values: + - ../.lint/ingress.yaml + set: + clusterName: "helm-test.example.com" + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: generates a config with proxy_service.trust_x_forwarded_for=true when version = 14.0.0-rc.1 and ingress.enabled=true + chart: + version: "14.0.0-rc.1" + values: + - ../.lint/ingress.yaml + set: + clusterName: "helm-test.example.com" + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled is not set + chart: + version: 13.2.0 + set: + clusterName: "helm-test.example.com" + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled=true + chart: + version: 13.1.5 + values: + - ../.lint/ingress.yaml + set: + clusterName: "helm-test.example.com" + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml + + - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled is not set + chart: + version: 13.1.5 + set: + clusterName: "helm-test.example.com" + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml diff --git a/helm/old/teleport-cluster/tests/proxy_deployment_test.yaml b/helm/old/teleport-cluster/tests/proxy_deployment_test.yaml new file mode 100644 index 0000000..4c4ddf4 --- /dev/null +++ b/helm/old/teleport-cluster/tests/proxy_deployment_test.yaml @@ -0,0 +1,899 @@ +suite: Proxy Deployment +templates: + - proxy/deployment.yaml + - proxy/config.yaml +tests: + - it: sets Deployment annotations when specified + template: proxy/deployment.yaml + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/deployment + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/deployment-different + value: 3 + + - it: sets Pod annotations when specified + template: proxy/deployment.yaml + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: spec.template.metadata.annotations.kubernetes\.io/pod + value: test-annotation + - equal: + path: spec.template.metadata.annotations.kubernetes\.io/pod-different + value: 4 + + - it: should not have more than one replica if no certificate is passed + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - equal: + path: spec.replicas + value: 1 + + - it: should have multiple replicas by default when a certificate is passed through a secret + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + tls: + existingSecretName: my-certs + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should have multiple replicas by default when certManager is configured + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + certManager: + enabled: true + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should have multiple replicas when global replicaCount is set and a certificate is passed + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + replicaCount: 3 + certManager: + enabled: true + asserts: + - equal: + path: spec.replicas + value: 3 + + - it: should have a single replica when proxy-specific replicaCount is set to 1 and a cert is passed + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + certManager: + enabled: true + proxy: + highAvailability: + replicaCount: 1 + asserts: + - equal: + path: spec.replicas + value: 1 + + - it: should set affinity when set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + replicaCount: 3 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport + asserts: + - isNotNull: + path: spec.template.spec.affinity + - matchSnapshot: + path: spec.template.spec.affinity + + - it: should set required affinity when highAvailability.requireAntiAffinity is set + template: proxy/deployment.yaml + values: + - ../.lint/aws-ha-antiaffinity.yaml + asserts: + - isNotNull: + path: spec.template.spec.affinity + - isNotNull: + path: spec.template.spec.affinity.podAntiAffinity + - isNotNull: + path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution + - matchSnapshot: + path: spec.template.spec.affinity + + - it: should set tolerations when set in values + template: proxy/deployment.yaml + values: + - ../.lint/tolerations.yaml + asserts: + - isNotNull: + path: spec.template.spec.tolerations + - matchSnapshot: + path: spec.template.spec.tolerations + + - it: should set resources when set in values + template: proxy/deployment.yaml + values: + - ../.lint/resources.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.containers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.containers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.containers[0].resources.requests.memory + value: 2Gi + - matchSnapshot: + path: spec.template.spec + + - it: should set securityContext when set in values + template: proxy/deployment.yaml + values: + - ../.lint/security-context.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.privileged + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.runAsGroup + value: 99 + - equal: + path: spec.template.spec.containers[0].securityContext.runAsNonRoot + value: true + - equal: + path: spec.template.spec.containers[0].securityContext.runAsUser + value: 99 + - matchSnapshot: + path: spec.template.spec + + - it: should not set securityContext when is empty object (default value) + template: proxy/deployment.yaml + values: + - ../.lint/security-context-empty.yaml + asserts: + - isNull: + path: spec.template.spec.containers[0].securityContext + + - it: should set securityContext for initContainers when set in values + template: proxy/deployment.yaml + values: + - ../.lint/security-context.yaml + asserts: + - equal: + path: spec.template.spec.initContainers[0].securityContext.allowPrivilegeEscalation + value: false + - equal: + path: spec.template.spec.initContainers[0].securityContext.privileged + value: false + - equal: + path: spec.template.spec.initContainers[0].securityContext.readOnlyRootFilesystem + value: false + - equal: + path: spec.template.spec.initContainers[0].securityContext.runAsGroup + value: 99 + - equal: + path: spec.template.spec.initContainers[0].securityContext.runAsNonRoot + value: true + - equal: + path: spec.template.spec.initContainers[0].securityContext.runAsUser + value: 99 + - matchSnapshot: + path: spec.template.spec + + + - it: should not set securityContext for initContainers when is empty object (default value) + template: proxy/deployment.yaml + values: + - ../.lint/security-context-empty.yaml + asserts: + - isNull: + path: spec.template.spec.initContainers[0].securityContext + + # we can't use the dynamic chart version or appVersion as a variable in the tests, + # so we override it manually and check that gets set instead + # this saves us having to update the test every time we cut a new release + - it: should use enterprise image when enterprise is set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + enterprise: true + teleportVersionOverride: 12.2.1 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: public.ecr.aws/gravitational/teleport-ent-distroless:12.2.1 + + - it: should use OSS image when enterprise is not set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint + teleportVersionOverride: 12.2.1 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: public.ecr.aws/gravitational/teleport-distroless:12.2.1 + + - it: should mount TLS certs when cert-manager is enabled + template: proxy/deployment.yaml + values: + - ../.lint/gcp-ha-acme.yaml + - ../.lint/initcontainers.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: teleport-tls + - contains: + path: spec.template.spec.initContainers[1].volumeMounts + content: + mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + - contains: + path: spec.template.spec.initContainers[2].volumeMounts + content: + mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + + - it: should mount ConfigMap containing Teleport config + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport + name: "config" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: config + configMap: + name: RELEASE-NAME-proxy + + - it: should mount extraVolumes and extraVolumeMounts on container and initContainers + template: proxy/deployment.yaml + values: + - ../.lint/volumes.yaml + - ../.lint/initcontainers.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.initContainers[1].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.initContainers[2].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.volumes + content: + name: my-mount + secret: + secretName: mySecret + + - it: should set imagePullPolicy when set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + imagePullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: should set environment when extraEnv set in values + template: proxy/deployment.yaml + values: + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: SOME_ENVIRONMENT_VARIABLE + value: "some-value" + + - it: should set imagePullSecrets when set in values + template: proxy/deployment.yaml + values: + - ../.lint/imagepullsecrets.yaml + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: myRegistryKeySecretName + - matchSnapshot: + path: spec.template.spec.imagePullSecrets + + - it: should provision initContainer correctly when set in values + template: proxy/deployment.yaml + values: + - ../.lint/initcontainers.yaml + - ../.lint/resources.yaml + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.initContainers[1].args + content: "echo test" + - equal: + path: spec.template.spec.initContainers[1].name + value: "teleport-init" + - equal: + path: spec.template.spec.initContainers[1].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[1].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[1].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[1].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[1].resources.requests.memory + value: 2Gi + - contains: + path: spec.template.spec.initContainers[2].args + content: "echo test2" + - equal: + path: spec.template.spec.initContainers[2].name + value: "teleport-init2" + - equal: + path: spec.template.spec.initContainers[2].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[2].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[2].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[2].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[2].resources.requests.memory + value: 2Gi + - matchSnapshot: + path: spec.template.spec.initContainers + + - it: should add insecureSkipProxyTLSVerify to args when set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + insecureSkipProxyTLSVerify: true + asserts: + - contains: + path: spec.template.spec.containers[0].args + content: "--insecure" + + - it: should expose diag port + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: diag + containerPort: 3000 + protocol: TCP + + - it: should expose tls port + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: tls + containerPort: 3080 + protocol: TCP + + - it: should expose tls port when proxyListenerMode is multiplex + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: tls + containerPort: 3080 + protocol: TCP + + - it: should not expose proxy peering port by default + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: proxypeering + containerPort: 3021 + protocol: TCP + + - it: should expose proxy peering port when enterprise is true + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + enterprise: true + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: proxypeering + containerPort: 3021 + protocol: TCP + + - it: should expose sshproxy port by default + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: sshproxy + containerPort: 3023 + protocol: TCP + + - it: should not expose sshproxy port when proxyListenerMode is multiplex + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: sshproxy + containerPort: 3023 + protocol: TCP + + - it: should expose sshtun port by default + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: sshtun + containerPort: 3024 + protocol: TCP + + - it: should not expose sshtun port when proxyListenerMode is multiplex + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: sshtun + containerPort: 3024 + protocol: TCP + + - it: should expose k8s port by default + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: kube + containerPort: 3026 + protocol: TCP + + - it: should not expose k8s port when proxyListenerMode is multiplex + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: kube + containerPort: 3026 + protocol: TCP + + - it: should expose mysql port by default + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: mysql + containerPort: 3036 + protocol: TCP + + - it: should not expose mysql port when proxyListenerMode is multiplex + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: mysql + containerPort: 3036 + protocol: TCP + + - it: should expose postgres port when separate postgres listener is enabled + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + separatePostgresListener: true + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: postgres + containerPort: 5432 + protocol: TCP + + - it: should not expose postgres port when proxyListenerMode is multiplex and separate postgres listener is enabled + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + separatePostgresListener: true + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: postgres + containerPort: 5432 + protocol: TCP + + - it: should expose mongo port when separate mongo listener is enabled + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + separateMongoListener: true + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: mongo + containerPort: 27017 + protocol: TCP + + - it: should not expose mongo port when when proxyListenerMode is multiplex and separate mongo listener is enabled + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + proxyListenerMode: multiplex + separateMongoListener: true + asserts: + - notContains: + path: spec.template.spec.containers[0].ports + content: + name: mongo + containerPort: 27017 + protocol: TCP + + - it: should set postStart command if set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + postStart: + command: ["/bin/echo", "test"] + asserts: + - equal: + path: spec.template.spec.containers[0].lifecycle.postStart.exec.command + value: ["/bin/echo", "test"] + + - it: should add and mount emptyDir for data + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/lib/teleport + name: data + - contains: + path: spec.template.spec.volumes + content: + name: data + emptyDir: {} + + - it: should set priorityClassName when set in values + template: proxy/deployment.yaml + values: + - ../.lint/priority-class-name.yaml + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: system-cluster-critical + + - it: should set probeTimeoutSeconds when set in values + template: proxy/deployment.yaml + values: + - ../.lint/probe-timeout-seconds.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds + value: 5 + - equal: + path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds + value: 5 + + - it: should not mount TLS secrets when when highAvailability.certManager.enabled is false and tls.existingSecretName is not set + template: proxy/deployment.yaml + set: + clusterName: helm-lint-test-cluster + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: teleport-tls + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: teleport-tls + readOnly: true + + - it: should mount cert-manager TLS secret when highAvailability.certManager.enabled is true + template: proxy/deployment.yaml + values: + - ../.lint/cert-manager.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: teleport-tls + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: teleport-tls + readOnly: true + + - it: should mount tls.existingSecretName when set in values + template: proxy/deployment.yaml + values: + - ../.lint/existing-tls-secret.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: helm-lint-existing-tls-secret + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: teleport-tls + readOnly: true + + - it: should mount tls.existingCASecretName and set environment when set in values + template: proxy/deployment.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls-ca + secret: + secretName: helm-lint-existing-tls-secret-ca + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls-ca + name: teleport-tls-ca + readOnly: true + - contains: + path: spec.template.spec.containers[0].env + content: + name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + + - it: should mount tls.existingCASecretName and set extra environment when set in values + template: proxy/deployment.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls-ca + secret: + secretName: helm-lint-existing-tls-secret-ca + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls-ca + name: teleport-tls-ca + readOnly: true + - contains: + path: spec.template.spec.containers[0].env + content: + name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + - contains: + path: spec.template.spec.containers[0].env + content: + name: SOME_ENVIRONMENT_VARIABLE + value: some-value + + - it: should set minReadySeconds when replicaCount > 1 + template: proxy/deployment.yaml + set: + clusterName: helm-lint + highAvailability: + certManager: + enabled: true + replicaCount: 3 + minReadySeconds: 60 + asserts: + - equal: + path: spec.minReadySeconds + value: 60 + + - it: should not set minReadySeconds when replicaCount = 1 + template: proxy/deployment.yaml + set: + chartMode: scratch + highAvailability: + minReadySeconds: 60 + replicaCount: 1 + asserts: + - equal: + path: spec.minReadySeconds + value: null + + - it: should set nodeSelector when set in values + template: proxy/deployment.yaml + set: + chartMode: scratch + clusterName: helm-lint.example.com + nodeSelector: + role: bastion + environment: security + asserts: + - isNotNull: + path: spec.template.spec.nodeSelector + - matchSnapshot: + path: spec.template.spec + + - it: mounts regular tokens on older Kubernetes versions + template: proxy/deployment.yaml + set: + clusterName: helm-lint + capabilities: + majorVersion: 1 + minorVersion: 18 + asserts: + - notEqual: + path: spec.template.spec.automountServiceAccountToken + value: false + - notContains: + path: spec.template.spec.volumes + content: + name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true + + - it: mounts tokens through projected volumes on newer Kubernetes versions + template: proxy/deployment.yaml + set: + clusterName: helm-lint + capabilities: + majorVersion: 1 + minorVersion: 21 + asserts: + - equal: + path: spec.template.spec.automountServiceAccountToken + value: false + - contains: + path: spec.template.spec.volumes + content: + name: proxy-serviceaccount-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: proxy-serviceaccount-token + readOnly: true diff --git a/helm/old/teleport-cluster/tests/proxy_pdb_test.yaml b/helm/old/teleport-cluster/tests/proxy_pdb_test.yaml new file mode 100644 index 0000000..851a0a7 --- /dev/null +++ b/helm/old/teleport-cluster/tests/proxy_pdb_test.yaml @@ -0,0 +1,23 @@ +suite: Proxy PodDisruptionBudget +templates: + - proxy/pdb.yaml +tests: + - it: not should create a PDB when disabled in values + set: + highAvailability: + podDisruptionBudget: + enabled: false + asserts: + - hasDocuments: + count: 0 + - it: should create a PDB when enabled in values (pdb.yaml) + values: + - ../.lint/pdb.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PodDisruptionBudget + - equal: + path: spec.minAvailable + value: 2 diff --git a/helm/old/teleport-cluster/tests/proxy_service_test.yaml b/helm/old/teleport-cluster/tests/proxy_service_test.yaml new file mode 100644 index 0000000..29ed547 --- /dev/null +++ b/helm/old/teleport-cluster/tests/proxy_service_test.yaml @@ -0,0 +1,381 @@ +suite: Proxy Service +templates: + - proxy/service.yaml +tests: + - it: uses a LoadBalancer by default + set: + clusterName: teleport.example.com + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: LoadBalancer + + - it: uses a ClusterIP when service.type=ClusterIP + set: + clusterName: teleport.example.com + service: + type: ClusterIP + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: ClusterIP + + - it: uses a ClusterIP when proxy.service.type=ClusterIP + set: + clusterName: teleport.example.com + service: + type: NodePort + proxy: + service: + type: ClusterIP + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: ClusterIP + + - it: fails to deploy when ingress.enabled=true and proxy.service.type is set to LoadBalancer (default) + set: + clusterName: teleport.example.com + ingress: + enabled: true + asserts: + - failedTemplate: + errorMessage: "proxy.service.type must not be LoadBalancer when using an ingress - any load balancer should be provisioned by your ingress controller. Set proxy.service.type=ClusterIP instead" + + - it: uses a ClusterIP when ingress.enabled=true and service.type=ClusterIP + set: + clusterName: teleport.example.com + ingress: + enabled: true + service: + type: ClusterIP + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: ClusterIP + + - it: uses a ClusterIP when ingress.enabled=true and proxy.service.type=ClusterIP + set: + clusterName: teleport.example.com + ingress: + enabled: true + proxy: + service: + type: ClusterIP + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: ClusterIP + + - it: uses a NodePort when ingress.enabled=true and proxy.service.type=NodePort + set: + clusterName: teleport.example.com + ingress: + enabled: true + proxy: + service: + type: NodePort + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: NodePort + + - it: uses a NodePort when ingress.enabled=true and service.type=NodePort + set: + clusterName: teleport.example.com + ingress: + enabled: true + service: + type: NodePort + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: NodePort + + - it: uses a NodePort when ingress.enabled=true and proxy.service.type is overridden + set: + clusterName: teleport.example.com + ingress: + enabled: true + proxy: + service: + type: NodePort + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: NodePort + + - it: sets AWS annotations when chartMode=aws + set: + clusterName: teleport.example.com + chartMode: aws + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Service + - equal: + path: spec.type + value: LoadBalancer + - equal: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type + value: nlb + - equal: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol + value: tcp + - equal: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-cross-zone-load-balancing-enabled + value: "true" + + - it: sets service annotations when specified + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/service + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/service-different + value: 5 + + - it: adds a separate Postgres listener port when separatePostgresListener is true + values: + - ../.lint/separate-postgres-listener.yaml + asserts: + - contains: + path: spec.ports + content: + name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP + + - it: does not add a separate Postgres listener port when separatePostgresListener is true and ingress.enabled=true + values: + - ../.lint/separate-postgres-listener.yaml + set: + ingress: + enabled: true + proxyListenerMode: multiplex + service: + type: ClusterIP + asserts: + - notContains: + path: spec.ports + content: + name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP + + - it: adds a separate Mongo listener port when separateMongoListener is true + values: + - ../.lint/separate-mongo-listener.yaml + asserts: + - contains: + path: spec.ports + content: + name: mongo + port: 27017 + targetPort: 27017 + protocol: TCP + + - it: does not add a separate Mongo listener port when separateMongoListener is true and ingress.enabled=true + values: + - ../.lint/separate-mongo-listener.yaml + set: + ingress: + enabled: true + proxyListenerMode: multiplex + service: + type: ClusterIP + asserts: + - notContains: + path: spec.ports + content: + name: mongo + port: 27017 + targetPort: 27017 + protocol: TCP + + - it: sets AWS backend protocol annotation to ssl when in AWS mode and ACM annotation is set + values: + - ../.lint/aws-ha.yaml + set: + annotations: + service: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:1234567890:certificate/a857a76c-51d0-4d3d-8000-465bb3e9829b + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: 443 + asserts: + - equal: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol + value: ssl + + - it: does not add AWS backend protocol annotation when in AWS mode, ACM annotation is set and ingress is enabled + values: + - ../.lint/aws-ha.yaml + set: + ingress: + enabled: true + service: + type: ClusterIP + annotations: + service: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:1234567890:certificate/a857a76c-51d0-4d3d-8000-465bb3e9829b + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: 443 + asserts: + - isNull: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol + + - it: sets AWS backend protocol annotation to tcp when in AWS mode and ACM annotation is not set + values: + - ../.lint/aws-ha.yaml + asserts: + - equal: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol + value: tcp + + - it: does not set AWS backend protocol annotation when in AWS mode, ACM annotation is not set and ingress is enabled + values: + - ../.lint/aws-ha.yaml + set: + ingress: + enabled: true + service: + type: ClusterIP + annotations: + service: + # required so at least one service annotation exists, to avoid non map type error + service.beta.kubernetes.io/random-annotation: helm-lint + asserts: + - isNull: + path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol + + - it: exposes separate listener ports by default + values: + - ../.lint/example-minimal-standalone.yaml + asserts: + - matchSnapshot: + path: spec.ports + + - it: does not expose separate listener ports by default when ingress.enabled=true + values: + - ../.lint/example-minimal-standalone.yaml + set: + ingress: + enabled: true + proxyListenerMode: multiplex + service: + type: ClusterIP + asserts: + - notContains: + path: spec.ports + content: + - name: sshproxy + port: 3023 + targetPort: 3023 + protocol: TCP + - name: k8s + port: 3026 + targetPort: 3026 + protocol: TCP + - name: sshtun + port: 3024 + targetPort: 3024 + protocol: TCP + - name: mysql + port: 3036 + targetPort: 3036 + protocol: TCP + - matchSnapshot: + path: spec.ports + + - it: exposes separate listener ports when running in separate mode + values: + - ../.lint/proxy-listener-mode-separate.yaml + asserts: + - matchSnapshot: + path: spec.ports + + - it: does not expose separate listener ports when running in separate mode and ingress.enabled=true + values: + - ../.lint/proxy-listener-mode-separate.yaml + set: + ingress: + enabled: true + proxyListenerMode: multiplex + service: + type: ClusterIP + asserts: + - notContains: + path: spec.ports + content: + - name: sshproxy + port: 3023 + targetPort: 3023 + protocol: TCP + - name: k8s + port: 3026 + targetPort: 3026 + protocol: TCP + - name: sshtun + port: 3024 + targetPort: 3024 + protocol: TCP + - name: mysql + port: 3036 + targetPort: 3036 + protocol: TCP + - matchSnapshot: + path: spec.ports + + - it: exposes a single port when running in multiplex mode + values: + - ../.lint/proxy-listener-mode-multiplex.yaml + asserts: + - matchSnapshot: + path: spec.ports + + - it: exposes a single port when running in multiplex mode and ingress.enabled=true + values: + - ../.lint/proxy-listener-mode-multiplex.yaml + set: + ingress: + enabled: true + service: + type: ClusterIP + asserts: + - matchSnapshot: + path: spec.ports diff --git a/helm/old/teleport-cluster/tests/proxy_serviceaccount_test.yaml b/helm/old/teleport-cluster/tests/proxy_serviceaccount_test.yaml new file mode 100644 index 0000000..14ec87f --- /dev/null +++ b/helm/old/teleport-cluster/tests/proxy_serviceaccount_test.yaml @@ -0,0 +1,22 @@ +suite: Proxy ServiceAccount +templates: + - proxy/serviceaccount.yaml +tests: + - it: sets ServiceAccount annotations when specified + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/serviceaccount + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/serviceaccount-different + value: 6 + + - it: changes ServiceAccount name when specified and appends "-proxy" + values: + - ../.lint/service-account.yaml + asserts: + - equal: + path: metadata.name + value: "helm-lint-proxy" diff --git a/helm/old/teleport-cluster/tests/psp_test.yaml b/helm/old/teleport-cluster/tests/psp_test.yaml new file mode 100644 index 0000000..fa3b66e --- /dev/null +++ b/helm/old/teleport-cluster/tests/psp_test.yaml @@ -0,0 +1,35 @@ +suite: PodSecurityPolicy +templates: + - psp.yaml +tests: + - it: creates a PodSecurityPolicy when enabled in values and supported + capabilities: + majorVersion: 1 + minorVersion: 22 + set: + podSecurityPolicy: + enabled: true + asserts: + - hasDocuments: + count: 3 + - documentIndex: 0 + isKind: + of: PodSecurityPolicy + - documentIndex: 1 + isKind: + of: Role + - documentIndex: 2 + isKind: + of: RoleBinding + - matchSnapshot: {} + + - it: does not create a PodSecurityPolicy when enabled in values but not supported + set: + podSecurityPolicy: + enabled: true + capabilities: + majorVersion: 1 + minorVersion: 25 + asserts: + - hasDocuments: + count: 0 diff --git a/helm/old/teleport-cluster/values.schema.json b/helm/old/teleport-cluster/values.schema.json new file mode 100644 index 0000000..3169457 --- /dev/null +++ b/helm/old/teleport-cluster/values.schema.json @@ -0,0 +1,923 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "required": [ + "clusterName", + "authentication", + "enterprise", + "operator", + "podSecurityPolicy", + "labels", + "chartMode", + "validateConfigOnDeploy", + "highAvailability", + "podMonitor", + "tls", + "image", + "enterpriseImage", + "log", + "affinity", + "nodeSelector", + "annotations", + "extraVolumes", + "extraVolumeMounts", + "imagePullPolicy", + "initContainers", + "resources", + "tolerations", + "probeTimeoutSeconds" + ], + "properties": { + "clusterName": { + "$id": "#/properties/clusterName", + "type": "string", + "default": "" + }, + "auth": { + "$id": "#/properties/auth", + "type": "object" + }, + "proxy": { + "$id": "#/properties/proxy", + "type": "object" + }, + "createProxyToken": { + "$id": "#/properties/createProxyToken", + "type": "boolean", + "default": true + }, + "podMonitor": { + "$id": "#/properties/podMonitor", + "type": "object", + "required": ["enabled"], + "properties": { + "enabled": { + "$id": "#/properties/podMonitor/enabled", + "type": "boolean", + "default": false + }, + "additionalLabels": { + "$id": "#/properties/podMonitor/additionalLabels", + "type": "object", + "default": {"prometheus": "default"}, + "additionalProperties": {"type": "string"} + }, + "interval": { + "$id": "#/properties/podMonitor/interval", + "type": "string", + "default": "30s" + } + } + }, + "authentication": { + "$id": "#/properties/authentication", + "type": "object", + "required": ["type", "localAuth"], + "properties": { + "type": { + "$id": "#/properties/authentication/properties/type", + "type": "string", + "default": "local" + }, + "connectorName": { + "$id": "#/properties/authentication/properties/connectorName", + "type": "string", + "default": "" + }, + "localAuth": { + "$id": "#/properties/authentication/properties/localAuth", + "type": "boolean", + "default": true + }, + "lockingMode": { + "$id": "#/properties/authentication/properties/lockingMode", + "type": "string", + "default": "" + }, + "secondFactor": { + "$id": "#/properties/authentication/properties/secondFactor", + "type": "string", + "enum": ["off", "on", "otp", "optional", "webauthn"], + "default": "otp" + }, + "webauthn": { + "$id": "#/properties/authentication/properties/webauthn", + "type": "object", + "required": [], + "properties": { + "attestationAllowedCas": { + "$id": "#/properties/authentication/properties/webauthn/properties/attestationAllowedCas", + "type": "array", + "default": [] + }, + "attestationDeniedCas": { + "$id": "#/properties/authentication/properties/webauthn/properties/attestationDeniedCas", + "type": "array", + "default": [] + } + } + } + } + }, + "authenticationType": { + "$id": "#/properties/authenticationType", + "type": "string" + }, + "authenticationSecondFactor": { + "$id": "#/properties/authenticationSecondFactor", + "type": "object", + "required": [], + "properties": { + "secondFactor": { + "$id": "#/properties/authenticationSecondFactor/properties/secondFactor", + "type": "string", + "enum": ["off", "on", "otp", "optional", "webauthn"], + "default": "otp" + }, + "webauthn": { + "$id": "#/properties/authenticationSecondFactor/properties/webauthn", + "type": "object", + "required": [], + "properties": { + "attestationAllowedCas": { + "$id": "#/properties/authenticationSecondFactor/properties/webauthn/properties/attestationAllowedCas", + "type": "array", + "default": [] + }, + "attestationDeniedCas": { + "$id": "#/properties/authenticationSecondFactor/properties/webauthn/properties/attestationDeniedCas", + "type": "array", + "default": [] + } + } + } + } + }, + "proxyListenerMode": { + "$id": "#/properties/proxyListenerMode", + "type": "string", + "default": "" + }, + "sessionRecording": { + "$id": "#/properties/sessionRecording", + "type": "string", + "default": "" + }, + "separatePostgresListener": { + "$id": "#/properties/separatePostgresListener", + "type": "boolean", + "default": false + }, + "separateMongoListener": { + "$id": "#/properties/separateMongoListener", + "type": "boolean", + "default": false + }, + "publicAddr": { + "$id": "#/properties/publicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "kubePublicAddr": { + "$id": "#/properties/kubePublicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "mongoPublicAddr": { + "$id": "#/properties/mongoPublicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "mysqlPublicAddr": { + "$id": "#/properties/mysqlPublicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "postgresPublicAddr": { + "$id": "#/properties/postgresPublicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "sshPublicAddr": { + "$id": "#/properties/sshPublicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "tunnelPublicAddr": { + "$id": "#/properties/tunnelPublicAddr", + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "teleportVersionOverride": { + "$id": "#/properties/teleportVersionOverride", + "type": "string", + "default": "" + }, + "acme": { + "$id": "#/properties/acme", + "type": "boolean", + "default": false + }, + "acmeEmail": { + "$id": "#/properties/acmeEmail", + "type": "string", + "default": "" + }, + "acmeURI": { + "$id": "#/properties/acmeURI", + "type": "string", + "default": "" + }, + "enterprise": { + "$id": "#/properties/enterprise", + "type": "boolean", + "default": false + }, + "installCRDs": { + "$id": "#/properties/installCRDs", + "type": "boolean" + }, + "operator": { + "$id": "#/properties/operator", + "type": "object", + "required": ["enabled"], + "properties": { + "enabled": { + "$id": "#/properties/operator/properties/enabled", + "type": "boolean", + "default": false + }, + "image": { + "$id": "#/properties/operator/properties/image", + "type": "string", + "default": "public.ecr.aws/gravitational/teleport-operator" + }, + "resources": { + "$id": "#/properties/operator/properties/resources", + "type": "object", + "default": {} + } + } + }, + "podSecurityPolicy": { + "$id": "#/properties/podSecurityPolicy", + "type": "object", + "required": [ + "enabled" + ], + "properties": { + "enabled": { + "$id": "#/properties/podSecurityPolicy/properties/enabled", + "type": "boolean", + "default": true + } + } + }, + "labels": { + "$id": "#/properties/labels", + "type": "object", + "default": {} + }, + "chartMode": { + "$id": "#/properties/chartMode", + "type": "string", + "enum": [ + "standalone", + "aws", + "azure", + "gcp", + "scratch" + ], + "default": "standalone" + }, + "validateConfigOnDeploy": { + "$id": "#/properties/validateConfigOnDeploy", + "type": "boolean", + "default": true + }, + "standalone": { + "$id": "#/properties/standalone", + "type": "object", + "required": [ + "volumeSize" + ], + "properties": { + "existingClaimName": { + "$id": "#/properties/standalone/properties/existingClaimName", + "type": "string", + "default": "" + }, + "volumeSize": { + "$id": "#/properties/standalone/properties/volumeSize", + "type": "string", + "default": "" + } + } + }, + "persistence": { + "$id": "#/properties/persistence", + "type": "object", + "required": [ + "enabled", + "volumeSize" + ], + "properties": { + "enabled": { + "$id": "#/properties/persistence/properties/enabled", + "type": "boolean", + "default": "true" + }, + "existingClaimName": { + "$id": "#/properties/persistence/properties/existingClaimName", + "type": "string", + "default": "" + }, + "storageClassName": { + "$id": "#/properties/persistence/properties/storageClassName", + "type": "string", + "default": "" + }, + "volumeSize": { + "$id": "#/properties/persistence/properties/volumeSize", + "type": "string", + "default": "" + } + } + }, + "aws": { + "$id": "#/properties/aws", + "type": "object", + "properties": { + "region": { + "$id": "#/properties/aws/properties/region", + "type": "string", + "default": "" + }, + "backendTable": { + "$id": "#/properties/aws/properties/backendTable", + "type": "string", + "default": "" + }, + "auditLogTable": { + "$id": "#/properties/aws/properties/auditLogTable", + "type": "string", + "default": "" + }, + "auditLogMirrorOnStdout": { + "$id": "#/properties/aws/properties/auditLogMirrorOnStdout", + "type": "boolean", + "default": "false" + }, + "sessionRecordingBucket": { + "$id": "#/properties/aws/properties/sessionRecordingBucket", + "type": "string", + "default": "" + }, + "backups": { + "$id": "#/properties/aws/properties/backups", + "type": "boolean", + "default": false + }, + "dynamoAutoScaling": { + "$id": "#/properties/aws/properties/dynamoAutoScaling", + "type": "boolean", + "default": false + } + }, + "if": { + "properties": { + "dynamoAutoScaling": { + "const": true + } + } + }, + "then": { + "properties": { + "readMinCapacity": { + "$id": "#/properties/aws/properties/readMinCapacity", + "type": "integer" + }, + "readMaxCapacity": { + "$id": "#/properties/aws/properties/readMaxCapacity", + "type": "integer" + }, + "readTargetValue": { + "$id": "#/properties/aws/properties/readTargetValue", + "type": "number" + }, + "writeMinCapacity": { + "$id": "#/properties/aws/properties/writeMinCapacity", + "type": "integer" + }, + "writeMaxCapacity": { + "$id": "#/properties/aws/properties/writeMaxCapacity", + "type": "integer" + }, + "writeTargetValue": { + "$id": "#/properties/aws/properties/writeTargetValue", + "type": "number" + } + } + }, + "else": { + "properties": { + "readMinCapacity": { + "$id": "#/properties/aws/properties/readMinCapacity", + "type": "null" + }, + "readMaxCapacity": { + "$id": "#/properties/aws/properties/readMaxCapacity", + "type": "null" + }, + "readTargetValue": { + "$id": "#/properties/aws/properties/readTargetValue", + "type": "null" + }, + "writeMinCapacity": { + "$id": "#/properties/aws/properties/writeMinCapacity", + "type": "null" + }, + "writeMaxCapacity": { + "$id": "#/properties/aws/properties/writeMaxCapacity", + "type": "null" + }, + "writeTargetValue": { + "$id": "#/properties/aws/properties/writeTargetValue", + "type": "null" + } + } + } + }, + "azure": { + "$id": "#/properties/azure", + "type": "object", + "properties": { + "databaseHost": { + "$id": "#/properties/azure/properties/databaseHost", + "type": "string", + "default": "" + }, + "databaseUser": { + "$id": "#/properties/azure/properties/databaseUser", + "type": "string", + "default": "" + }, + "backendDatabase": { + "$id": "#/properties/azure/properties/backendDatabase", + "type": "string", + "default": "teleport_backend" + }, + "auditLogDatabase": { + "$id": "#/properties/azure/properties/auditLogDatabase", + "type": "string", + "default": "teleport_audit" + }, + "auditLogMirrorOnStdout": { + "$id": "#/properties/azure/properties/auditLogMirrorOnStdout", + "type": "boolean", + "default": false + }, + "sessionRecordingStorageAccount": { + "$id": "#/properties/azure/properties/sessionRecordingStorageAccount", + "type": "string", + "default": "" + }, + "clientID": { + "$id": "#/properties/azure/properties/clientID", + "type": "string", + "default": "" + }, + "databasePoolMaxConnections": { + "$id": "#/properties/azure/properties/databasePoolMaxConnections", + "type": "integer", + "default": 0 + } + } + }, + "gcp": { + "$id": "#/properties/gcp", + "type": "object", + "properties": { + "projectId": { + "$id": "#/properties/gcp/properties/projectId", + "type": "string", + "default": "" + }, + "backendTable": { + "$id": "#/properties/gcp/properties/backendTable", + "type": "string", + "default": "" + }, + "auditLogTable": { + "$id": "#/properties/gcp/properties/auditLogTable", + "type": "string", + "default": "" + }, + "auditLogMirrorOnStdout": { + "$id": "#/properties/aws/properties/auditLogMirrorOnStdout", + "type": "boolean", + "default": "false" + }, + "sessionRecordingBucket": { + "$id": "#/properties/gcp/properties/sessionRecordingBucket", + "type": "string", + "default": "" + }, + "credentialSecretName": { + "$id": "#/properties/gcp/properties/credentialSecretName", + "type": "string", + "default": "teleport-gcp-credentials" + } + } + }, + "highAvailability": { + "$id": "#/properties/highAvailability", + "type": "object", + "required": [ + "replicaCount", + "requireAntiAffinity", + "certManager", + "minReadySeconds", + "podDisruptionBudget" + ], + "properties": { + "replicaCount": { + "$id": "#/properties/highAvailability/properties/replicaCount", + "type": "integer", + "default": 1 + }, + "requireAntiAffinity": { + "$id": "#/properties/highAvailability/properties/requireAntiAffinity", + "type": "boolean", + "default": false + }, + "certManager": { + "$id": "#/properties/highAvailability/properties/certManager", + "type": "object", + "required": [ + "enabled", + "issuerName", + "issuerKind", + "issuerGroup" + ], + "properties": { + "addCommonName": { + "$id": "#/properties/highAvailability/properties/certManager/properties/addCommonName", + "type": "boolean", + "default": "false" + }, + "enabled": { + "$id": "#/properties/highAvailability/properties/certManager/properties/enabled", + "type": "boolean", + "default": "false" + }, + "issuerName": { + "$id": "#/properties/highAvailability/properties/certManager/properties/issuerName", + "type": "string", + "default": "" + }, + "issuerKind": { + "$id": "#/properties/highAvailability/properties/certManager/properties/issuerKind", + "type": "string", + "default": "Issuer" + }, + "issuerGroup": { + "$id": "#/properties/highAvailability/properties/certManager/properties/issuerGroup", + "type": "string", + "default": "cert-manager.io" + } + } + }, + "minReadySeconds": { + "$id": "#/properties/highAvailability/properties/minReadySeconds", + "type": "integer", + "default": 15 + }, + "podDisruptionBudget": { + "$id": "#/properties/highAvailability/properties/podDisruptionBudget", + "type": "object", + "required": [ + "enabled", + "minAvailable" + ], + "properties": { + "enabled": { + "$id": "#/properties/highAvailability/properties/podDisruptionBudget/properties/enabled", + "type": "boolean", + "default": false + }, + "minAvailable": { + "$id": "#/properties/highAvailability/properties/podDisruptionBudget/properties/minAvailable", + "type": "integer", + "default": 1 + } + } + } + } + }, + "tls": { + "$id": "#/properties/tls", + "type": "object", + "required": [ + "existingSecretName", + "existingCASecretName" + ], + "properties": { + "existingSecretName": { + "$id": "#/properties/tls/properties/existingSecretName", + "type": "string", + "default": "" + }, + "existingCASecretName": { + "$id": "#/properties/tls/properties/existingCASecretName", + "type": "string", + "default": "" + } + } + }, + "image": { + "$id": "#/properties/image", + "type": "string", + "default": "public.ecr.aws/gravitational/teleport-distroless" + }, + "enterpriseImage": { + "$id": "#/properties/enterpriseImage", + "type": "string", + "default": "public.ecr.aws/gravitational/teleport-ent-distroless" + }, + "imagePullSecrets": { + "$id": "#/properties/imagePullSecrets", + "type": "array", + "default": [] + }, + "logLevel": { + "$id": "#/properties/logLevel", + "type": "string", + "enum": [ + "DEBUG", + "INFO", + "WARN", + "WARNING", + "ERROR" + ], + "default": "INFO" + }, + "log": { + "$id": "#/properties/log", + "type": "object", + "required": [ + "output", + "format", + "extraFields" + ], + "properties": { + "level": { + "$id": "#/properties/log/properties/level", + "type": "string", + "enum": ["DEBUG", "INFO", "WARN", "WARNING", "ERROR"], + "default": "INFO" + }, + "deployment": { + "$id": "#/properties/log/properties/output", + "type": "string", + "default": {} + }, + "pod": { + "$id": "#/properties/log/properties/format", + "type": "string", + "default": {} + }, + "service": { + "$id": "#/properties/log/properties/extraFields", + "type": "array", + "default": {} + } + } + }, + "affinity": { + "$id": "#/properties/affinity", + "type": "object", + "default": {} + }, + "nodeSelector": { + "$id": "#/properties/nodeSelector", + "type": "object", + "default": {} + }, + "annotations": { + "$id": "#/properties/annotations", + "type": "object", + "required": [ + "config", + "deployment", + "pod", + "service", + "serviceAccount", + "certSecret", + "ingress" + ], + "properties": { + "config": { + "$id": "#/properties/annotations/properties/config", + "type": "object", + "default": {} + }, + "deployment": { + "$id": "#/properties/annotations/properties/deployment", + "type": "object", + "default": {} + }, + "pod": { + "$id": "#/properties/annotations/properties/pod", + "type": "object", + "default": {} + }, + "service": { + "$id": "#/properties/annotations/properties/service", + "type": "object", + "default": {} + }, + "serviceAccount": { + "$id": "#/properties/annotations/properties/serviceAccount", + "type": "object", + "default": {} + }, + "certSecret": { + "$id": "#/properties/annotations/properties/certSecret", + "type": "object", + "default": {} + } + } + }, + "service": { + "$id": "#/properties/service", + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "$id": "#properties/service/type", + "type": "string", + "default": "LoadBalancer" + }, + "spec": { + "$id": "#/properties/service/spec", + "type": "object", + "default": {} + } + } + }, + "ingress": { + "enabled": { + "$id": "#/properties/ingress/enabled", + "type": "boolean", + "default": false + }, + "suppressAutomaticWildcards": { + "$id": "#/properties/ingress/suppressAutomaticWildcards", + "type": "boolean", + "default": false + }, + "spec": { + "$id": "#/properties/ingress/spec", + "type": "object", + "default": {} + } + }, + "serviceAccount": { + "$id": "#/properties/serviceAccount", + "type": "object", + "required": [], + "properties": { + "name": { + "$id": "#properties/service/name", + "type": "string", + "default": "" + }, + "create": { + "$id": "#properties/service/create", + "type": "boolean", + "default": true + } + } + }, + "rbac": { + "$id": "#/properties/rbac", + "type": "object", + "required": [], + "properties": { + "create": { + "$id": "#properties/rbac/create", + "type": "boolean", + "default": true + } + } + }, + "extraArgs": { + "$id": "#/properties/extraArgs", + "type": "array", + "default": [] + }, + "extraEnv": { + "$id": "#/properties/extraEnv", + "type": "array", + "default": [] + }, + "extraVolumes": { + "$id": "#/properties/extraVolumes", + "type": "array", + "default": [] + }, + "extraVolumeMounts": { + "$id": "#/properties/extraVolumeMounts", + "type": "array", + "default": [] + }, + "imagePullPolicy": { + "$id": "#/properties/imagePullPolicy", + "type": "string", + "enum": [ + "Never", + "IfNotPresent", + "Always" + ], + "default": "IfNotPresent" + }, + "initContainers": { + "$id": "#/properties/initContainers", + "type": "array", + "default": [] + }, + "postStart": { + "$id": "#/properties/postStart", + "type": "object", + "required": [ + "command" + ], + "properties": { + "command": { + "$id": "#properties/postStart/command", + "type": "array", + "default": [] + } + } + }, + "kubeClusterName": { + "$id": "#/properties/kubeClusterName", + "type": "string", + "default": "" + }, + "resources": { + "$id": "#/properties/resources", + "type": "object", + "default": {} + }, + "securityContext": { + "$id": "#/properties/securityContext", + "type": "object", + "default": {} + }, + "tolerations": { + "$id": "#/properties/tolerations", + "type": "array", + "default": [] + }, + "priorityClassName": { + "$id": "#/properties/priorityClassName", + "type": "string", + "default": "" + }, + "probeTimeoutSeconds": { + "$id": "#/properties/probeTimeoutSeconds", + "type": "integer", + "default": 1 + }, + "terminationGracePeriodSeconds": { + "$id": "#/properties/terminationGracePeriodSeconds", + "type": "integer", + "default": 60 + } + } +} diff --git a/helm/old/teleport-cluster/values.yaml b/helm/old/teleport-cluster/values.yaml new file mode 100644 index 0000000..54283ec --- /dev/null +++ b/helm/old/teleport-cluster/values.yaml @@ -0,0 +1,638 @@ +################################################## +# Values that must always be provided by the user. +################################################## + +# `clusterName` controls the name used to refer to the Teleport cluster, along with +# the externally-facing public address to use to access it. In most setups this must +# be a fully-qualified domain name (e.g. `teleport.example.com`) as this value is +# used as the cluster's public address by default. +# +# Note: When using a fully qualified domain name as your `clusterName`, you will also +# need to configure the DNS provider for this domain to point to the external +# load balancer address of your Teleport cluster. +# +# Warning: The clusterName cannot be changed during a Teleport cluster's lifespan. +# If you need to change it, you must redeploy a completely new cluster. +clusterName: "" + +# Name for this kubernetes cluster to be used by teleport users. +kubeClusterName: "" + +################################################## +# Values that you may need to change. +################################################## + +# Version of teleport image, if different from chart version in Chart.yaml. +# DANGER: `teleportVersionOverride` MUST NOT be used to control the Teleport version. +# This chart is designed to run a specific teleport version (see Chart.yaml). +# You will face compatibility issues trying to run a different Teleport version with it. +# +# If you want to run Teleport version X, you should use `helm --version X` instead. +teleportVersionOverride: "" + +# The `teleport-cluster` charts deploys two sets of pods: auth and proxy. +# `auth` contains values specific for the auth pods. You can use it to +# set specific values for auth pods, taking precedence over chart-scoped values. +# For example, to override the [`postStart`](#postStart) value only for auth pods: +# +# auth: +# postStart: ["curl", "http://hook"] +# imagePullPolicy: Always +auth: + # auth.teleportConfig contains YAML teleport configuration for auth pods + # The configuration will be merged with the chart-generated configuration + # and will take precedence in case of conflict. + # + # See the Teleport Configuration Reference for the list of supported fields: + # https://goteleport.com/docs/reference/config/ + # + # teleportConfig: + # teleport: + # cache: + # enabled: false + # auth_service: + # client_idle_timeout: 2h + # client_idle_timeout_message: "Connection closed after 2hours without activity" + teleportConfig: {} + +# proxy contains values specific for the proxy pods +# You can override chart-scoped values, for example +# proxy: +# postStart: ["curl", "http://hook"] +# imagePullPolicy: Always +proxy: + # proxy.teleportConfig contains YAML teleport configuration for proxy pods + # The configuration will be merged with the chart-generated configuration + # and will take precedence in case of conflict + # + # See the Teleport Configuration Reference for the list of supported fields: + # https://goteleport.com/docs/reference/config/ + # + # teleportConfig: + # teleport: + # cache: + # enabled: false + # proxy_service: + # https_keypairs: + # - key_file: /my-custom-mount/key.pem + # cert_file: /my-custom-mount/cert.pem + teleportConfig: {} + +authentication: + # Default authentication type. Possible values are 'local' and 'github' for OSS, plus 'oidc' and 'saml' for Enterprise. + type: local + + # Sets the authenticator connector for SSO or the default connector for "local" authentication. + # See SSO for Enterprise (https://goteleport.com/docs/enterprise/sso/). + # See Passwordless for local + # (http://goteleport.com/docs/access-controls/guides/passwordless/#optional-enable-passwordless-by-default). + # Defaults to "local". + connectorName: "" + + # Enable/disable local authentication by setting `authentication.local_auth` in `teleport.yaml`. + # Disabling local auth is required for FedRAMP / FIPS; see https://gravitational.com/teleport/docs/enterprise/ssh-kubernetes-fedramp/. + localAuth: true + + # Controls the locking mode: in case of network split should Teleport guarantee availability or integrity ? + # Possible values are "best_effort" and "strict". When not defined, Teleport defaults to "best_effort". + # See https://goteleport.com/docs/access-controls/guides/locking/#next-steps-locking-modes. + lockingMode: "" + + # Second factor requirements for users of the Teleport cluster. + # Controls the `auth_config.authentication.second_factor` field in `teleport.yaml`. + # Possible values are 'off', 'on', 'otp', 'optional' and 'webauthn'. + # + # WARNING: + # If you set `publicAddr` for users to access the cluster under a domain different + # to clusterName you must manually set the webauthn Relying + # Party Identifier (RP ID) - https://www.w3.org/TR/webauthn-2/#relying-party-identifier + # If you don't, RP ID will default to `clusterName` and users will fail + # to register second factors. + # + # You can do this by setting the value + # `auth.teleportConfig.auth_service.authentication.webauthn.rp_id`. + # + # RP ID must be both a valid domain, and part of the full domain users are connecting to. + # For example, if users are accessing the cluster with the domain + # "teleport.example.com", RP ID can be "teleport.example.com" or "example.com". + # + # Changing the RP ID will invalidate all already registered webauthn second factors. + secondFactor: "on" + + # (Optional) When using webauthn this allows to restrict which vendor and key models can be used. + # webauthn: + # attestationAllowedCas: + # - /path/to/allowed_ca.pem + # - | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # attestationDeniedCas: + # - /path/to/denied_ca.pem + # - | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + +# Deprecated way to set the authentication type, `authentication.type` should be preferred. +# authenticationType: local + +# Deprecated way to set the authentication second factor, `authentication.secondFactor` should be preferred. +# authenticationSecondFactor: +# secondFactor: "otp" + +# Teleport supports TLS routing. In this mode, all client connections are wrapped in TLS and multiplexed on one Teleport proxy port. +# Default mode will not utilize TLS routing and operate in backwards-compatibility mode. +# +# To use an ingress, set proxyListenerMode=multiplex, ingress.enabled=true and service.type=ClusterIP +# +# Possible values are 'separate' and 'multiplex' +proxyListenerMode: "separate" + +# Optional setting for configuring session recording. +# See `session_recording` under https://goteleport.com/docs/setup/reference/config/#teleportyaml +sessionRecording: "" + +# By default, Teleport will multiplex Postgres and MongoDB database connections on the same port as the proxy's web listener (443) +# Setting either of these values to true will separate the listeners out onto a separate port (5432 for Postgres, 27017 for MongoDB) +# This is useful when terminating TLS at a load balancer in front of Teleport (such as when using AWS ACM) +# These settings will not apply if proxyListenerMode is set to "multiplex". +separatePostgresListener: false +separateMongoListener: false + +# Do not set any of these values unless you explicitly need to. Teleport always uses the cluster name by default. +# +# WARNING: +# If you set `publicAddr` for users to access the cluster under a domain different +# to clusterName, you must manually set the webauthn Relying +# Party Identifier (RP ID) - https://www.w3.org/TR/webauthn-2/#relying-party-identifier +# If you don't, RP ID will default to `clusterName` and users will fail +# to register second factors. +# +# You can do this by setting the value +# `auth.teleportConfig.auth_service.authentication.webauthn.rp_id`. +# +# RP ID must be both a valid domain, and part of the full domain users are connecting to. +# For example, if users are accessing the cluster with the domain +# "teleport.example.com", RP ID can be "teleport.example.com" or "example.com". +# +# Changing the RP ID will invalidate all already registered webauthn second factors. +# +# Public cluster addresses, including port (e.g. teleport.example.com:443) +# Defaults to `clusterName` on port 443. +publicAddr: [] +# Public cluster kube addresses, including port. Defaults to `publicAddr` on port 3026. +# Only used when `proxyListenerMode` is not 'multiplex'. +kubePublicAddr: [] +# Public cluster mongo listener addresses, including port. Defaults to `publicAddr` on port 27017. +# Only used when `proxyListenerMode` is not 'multiplex' and `separateMongoListener` is true. +mongoPublicAddr: [] +# Public cluster MySQL addresses, including port. Defaults to `publicAddr` on port 3036. +# Only used when `proxyListenerMode` is not 'multiplex'. +mysqlPublicAddr: [] +# Public cluster postgres listener addresses, including port. Defaults to `publicAddr` on port 5432. +# Only used when `proxyListenerMode` is not 'multiplex' and `separatePostgresListener` is true. +postgresPublicAddr: [] +# Public cluster SSH addresses, including port. Defaults to `publicAddr` on port 3023. +# Only used when `proxyListenerMode` is not 'multiplex'. +sshPublicAddr: [] +# Public cluster tunnel SSH addresses, including port. Defaults to `publicAddr` on port 3024. +# Only used when `proxyListenerMode` is not 'multiplex'. +tunnelPublicAddr: [] + +# ACME is a protocol for getting Web X.509 certificates +# Note: ACME can only be used for single-instance clusters. It is not suitable for use in HA configurations. +# For HA configurations, see either the "highAvailability.certManager" or "tls" values. +# Setting acme to 'true' enables the ACME protocol and will attempt to get a free TLS certificate from Let's Encrypt. +# Setting acme to 'false' (the default) will cause Teleport to generate and use self-signed certificates for its web UI. +# This section is mutually exclusive with the "tls" value below. +acme: false +# acmeEmail is the email address to provide during certificate registration (this is a Let's Encrypt requirement) +acmeEmail: "" +# acmeURI is the ACME server to use for getting certificates. The default is to use Let's Encrypt's production server. +acmeURI: "" + +# Set enterprise to true to use enterprise image +# You will need to download your Enterprise license from the Teleport dashboard and create a secret to use this: +# kubectl -n ${TELEPORT_NAMESPACE?} create secret generic license --from-file=/path/to/downloaded/license.pem +enterprise: false + +# CRDs are installed by default when the operator is enabled. This manual override allows to disable CRD installation +# when deploying multiple releases in the same cluster. +# installCRDs: + +# Configuration of the optional Teleport operator +operator: + # Set enabled to true to add the Kubernetes Teleport Operator + enabled: false + # Kubernetes Teleport Operator image + image: public.ecr.aws/gravitational/teleport-operator + # Resources to request for the operator container + # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # requests: + # cpu: "0.5" + # memory: "1Gi" + # limits: + # memory: "1Gi" + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +# WARNING: the PSP won't be deployed for Kubernetes 1.23 and higher. +# Please read https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-kubernetes-1-25-psp/ +podSecurityPolicy: + enabled: true + +# Labels is a map of key-value pairs about this cluster +labels: {} + +# Mode to deploy the chart in. The default is "standalone". Options: +# - "standalone": will deploy a Teleport container running auth and proxy services with a PersistentVolumeClaim for storage. +# - "aws": will deploy Teleport using DynamoDB for backend/audit log storage and S3 for session recordings. (1) +# - "gcp": will deploy Teleport using Firestore for backend/audit log storage and Google Cloud storage for session recordings. (2) +# - "azure": will deploy Teleport using Azure Database for PostgreSQL for backend/audit and Azure Blob Storage for session recordings. (3) +# - "scratch": will deploy Teleport containers but will not provide default configuration file. You must pass your own configuration. (4) +# (1) To use "aws" mode, you must also configure the "aws" section below. +# (2) To use "gcp" mode, you must also configure the "gcp" section below. +# (3) To use "azure" mode, you must also configure the "azure" section below. +# (4) When set to "scratch", you must write the teleport configuration in auth.teleportConfig and proxy.teleportConfig. +# `scratch` usage is strongly discouraged, this is a last resort option and +# everything should be doable with `standalone` mode + overrides through +# `auth.teleportConfig` and `proxy.teleportConfig`. +chartMode: standalone + +# validateConfigOnDeploy enables a Kubernetes job before install and upgrade that will verify +# if the teleport.yaml configuration is valid and will block the deployment if it is not +validateConfigOnDeploy: true + +# Whether the chart should create a Teleport ProvisionToken for the proxies to join the Teleport cluster. +# Disabling this flag will cause the proxies not to be able to join the auth pods. In this case, the +# Helm chart user is responsible for configuring working join_params on the proxy. +createProxyToken: true + +# podMonitor controls the PodMonitor CR (from monitoring.coreos.com/v1) +# This CRD is managed by the prometheus-operator and allows workload to +# get monitored. To use this value, you need to run a `prometheus-operator` +# in the cluster for this value to take effect. +# See https://prometheus-operator.dev/docs/prologue/introduction/ +podMonitor: + # Whether the chart should deploy a PodMonitor. + # Disabled by default as it requires the PodMonitor CRD to be installed. + enabled: false + # additionalLabels to put on the PodMonitor. + # This is used to be selected by a specific prometheus instance. + # Defaults to {prometheus: default} which seems to be the common default prometheus selector + additionalLabels: + prometheus: default + # interval is the interval between two metrics scrapes. Defaults to 30s + interval: 30s + +###################################################################### +# Persistence settings (only used in "standalone" and "scratch" modes) +# NOTE: Changes in Kubernetes 1.23+ mean that persistent volumes will not automatically be provisioned in AWS EKS clusters +# without additional configuration. See https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html for more details. +# This driver addon must be configured to use persistent volumes in EKS clusters after Kubernetes 1.23. +###################################################################### +persistence: + # Enable persistence using a PersistentVolumeClaim + enabled: true + # Leave blank to automatically create a PersistentVolumeClaim for Teleport storage. + # If you would like to use a pre-existing PersistentVolumeClaim, put its name here. + existingClaimName: "" + # Size of persistent volume to request when created by Teleport. + # Ignored if existingClaimName is provided. + volumeSize: 10Gi + +################################################## +# AWS-specific settings (only used in "aws" mode) +################################################## +aws: + # The AWS region where the DynamoDB tables are located. + region: "" + # The DynamoDB table name to use for backend storage. Teleport will attempt to create this table automatically if it does not exist. + # The container will need an appropriately-provisioned IAM role with permissions to create DynamoDB tables. + backendTable: "" + # The DynamoDB table name to use for audit log storage. Teleport will attempt to create this table automatically if it does not exist. + # The container will need an appropriately-provisioned IAM role with permissions to create DynamoDB tables. + # This MUST NOT be the same table name as used for 'backendTable' as the schemas are different. + auditLogTable: "" + # Whether to mirror audit log entries to stdout in JSON format (useful for external log collectors) + auditLogMirrorOnStdout: false + # The S3 bucket name to use for recorded session storage. Teleport will attempt to create this bucket automatically if it does not exist. + # The container will need an appropriately-provisioned IAM role with permissions to create S3 buckets. + sessionRecordingBucket: "" + # Whether or not to turn on DynamoDB backups + backups: false + + # Whether Teleport should configure DynamoDB's autoscaling. + # Requires additional statements in the IAM Teleport Policy to be allowed to configure the autoscaling. + # See https://goteleport.com/docs/setup/reference/backends/#dynamodb-autoscaling + dynamoAutoScaling: false + + # DynamoDB autoscaling settings. Required if `dynamoAutoScaling` is `true`. + # See https://goteleport.com/docs/setup/reference/backends/#dynamodb-autoscaling + readMinCapacity: null # Integer + readMaxCapacity: null # Integer + readTargetValue: null # Float + writeMinCapacity: null # Integer + writeMaxCapacity: null # Integer + writeTargetValue: null # Float + +################################################## +# GCP-specific settings (only used in "gcp" mode) +################################################## +gcp: + # The project name being used for the GCP account where Teleport is running. + # See https://support.google.com/googleapi/answer/7014113?hl=en + projectId: "" + # The Firestore collection to use for backend storage. Teleport will attempt to create this collection automatically if it does not exist. + # Either of the following must be true: + # - The container will need an appropriately-provisioned IAM role/service account with permissions to create Firestore collections + # - The service account credentials provided via 'credentialSecretName' will need permissions to create Firestore collections. + backendTable: "" + # The Firestore collection to use for audit log storage. Teleport will attempt to create this collection automatically if it does not exist. + # Either of the following must be true: + # - The container will need an appropriately-provisioned IAM role/service account with permissions to create Firestore collections + # - The service account credentials provided via 'credentialSecretName' will need permissions to create Firestore collections. + # This MUST NOT be the same collection name as used for 'backendTable' as the schemas are different. + auditLogTable: "" + # Whether to mirror audit log entries to stdout in JSON format (useful for external log collectors) + auditLogMirrorOnStdout: false + # The Google storage bucket name to use for recorded session storage. This bucket must already exist in the Google account being used. + sessionRecordingBucket: "" + # The name of the Kubernetes secret used to store the Google credentials. + # You will need to create this secret manually. It must contain a JSON file from Google with the credentials that Teleport will use. + # You can override this to a blank value if the worker node running Teleport already has a service account which grants access. + credentialSecretName: teleport-gcp-credentials + +##################################################### +# Azure-specific settings (only used in "azure" mode) +##################################################### +azure: + # The fully qualified hostname of the Postgres database cluster hosted in Azure. + # It should follow the format ".postgres.database.azure.com". + databaseHost: "" + # The Postgres user Teleport must use to connect to the backend and audit + # databases. + databaseUser: "" + # The Postgres database to use for backend storage. + backendDatabase: "teleport_backend" + # The Postgres database to use for audit log storage. + # This MUST NOT be the same database as used for 'backendDatabase'. + auditLogDatabase: "teleport_audit" + # Whether to mirror audit log entries to stdout in JSON format (useful for external log collectors) + auditLogMirrorOnStdout: false + # The fully qualified domain name of the Azure Blob Storage account to use for + # recorded session storage. This account must already exist. + # It should follow the format ".blob.core.windows.net" + sessionRecordingStorageAccount: "" + # Azure client ID is used by the Kubernetes Service Account to know which + # Application it should impersonate. This can be unset only if the clientID is + # passed through other means (e.g. environment variable) + clientID: "" + # Controls the `pool_max_conns` setting passed to PostgreSQL. This is the + # max amount of connections Teleport can open to the database. This can affect + # performance on large clusters and depends on various factors like the + # database size, the number of CPU cores available for Teleport, GOMAXPROCS + # and the database latency. + # This only applies to the core backend connections, not the audit log ones. + # 0 means the parameter is not set and the client's default is used (recommended) + databasePoolMaxConnections: 0 + +# `highAvailability` contains settings controlling how Teleport pods are +# replicated and scheduled. This allows Teleport to run in a highly-available +# fashion: Teleport should sustain the crash/loss of a machine without interrupting +# the service. +# +# For auth pods: +# When using "standalone" or "scratch" mode, you must use highly-available storage +# (etcd, DynamoDB or Firestore) for multiple replicas to be supported. +# Manually configuring NFS-based storage or ReadWriteMany volume claims +# is NOT supported and will result in errors. Using Teleport's built-in +# ACME client (as opposed to using cert-manager or passing certs through a secret) +# is not supported with multiple replicas. +# For proxy pods: +# Proxy pods need to be provided a certificate to be replicated (either via +# `tls.existingSecretName` or via `highAvailability.certManager`). +# If proxy pods are replicable, they will default to 2 replicas, +# even if `highAvailability.replicaCount` is 1. To force a single proxy replica, +# set `proxy.highAvailability.replicaCount: 1`. +highAvailability: + # Controls the amount of pod replicas. The `highAvailability` comment describes + # the replication requirements. + # + # WARNING: You **must** meet the replication criteria, + # else the deployment will result in errors and inconsistent data. + replicaCount: 1 + # Setting 'requireAntiAffinity' to true will use 'requiredDuringSchedulingIgnoredDuringExecution' to require that multiple Teleport pods must not be scheduled on the + # same physical host. This will result in Teleport pods failing to be scheduled in very small clusters or during node downtime, so should be used with caution. + # Setting 'requireAntiAffinity' to false (the default) uses 'preferredDuringSchedulingIgnoredDuringExecution' to make this a soft requirement. + # This setting only has any effect when replicaCount is greater than 1. + requireAntiAffinity: false + # If enabled will create a Pod Disruption Budget + # https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + podDisruptionBudget: + enabled: false + minAvailable: 1 + # Settings for cert-manager (can be used for provisioning TLS certs in HA mode) + # These settings are mutually exclusive with the "tls" value below. + certManager: + # If set to true, a common name matching the cluster name will be set in the certificate signing request. This is mandatory for some CAs. + addCommonName: false + # If set to true, use cert-manager to get certificates for Teleport to use for TLS termination + enabled: false + # Name of the Issuer/ClusterIssuer to use for certs + # NOTE: You will always need to create this yourself when certManager.enabled is true. + issuerName: "" + # Kind of Issuer that cert-manager should look for. + # This defaults to 'Issuer' to keep everything contained within the teleport namespace. + issuerKind: Issuer + # Group of Issuer that cert-manager should look for. + # This defaults to 'cert-manager.io' which is the default Issuer group. + issuerGroup: cert-manager.io + # Injects delay when performing pod rollouts to mitigate the loss of all agent tunnels at the same time + # See https://github.com/gravitational/teleport/issues/13129 + minReadySeconds: 15 + +# Settings for mounting your own TLS keypair to secure Teleport's web UI. +# These settings are mutually exclusive with the "highAvailability.certManager" and "acme" values above. +tls: + # Name of an existing secret to use which contains a TLS keypair. Will automatically set the https_keypairs section in teleport.yaml. + # Create the secret in the same namespace as Teleport using `kubectl create secret tls my-tls-secret --cert=/path/to/cert/file --key=/path/to/key/file` + # See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets for more information. + existingSecretName: "" + # (optional) Name of an existing secret to use which contains a CA or trust bundle in x509 PEM format. + # Useful for building trust when using intermediate certificate authorities. + # This will automatically set the SSL_CERT_FILE environment variable to trust the CA. + # Create the secret with `kubectl create secret generic --from-file=ca.pem=/path/to/root-ca.pem + # The filename inside the secret is important - it _must_ be ca.pem + existingCASecretName: "" + +################################################## +# Values that you shouldn't need to change. +################################################## + +# Container image for the cluster. +# Since version 13, hardened distroless images are used by default. +# You can use the deprecated debian-based images by setting the value to +# `public.ecr.aws/gravitational/teleport`. Those images will be +# removed with teleport 14. +image: public.ecr.aws/gravitational/teleport-distroless +# Enterprise version of the image +# Since version 13, hardened distroless images are used by default. +# You can use the deprecated debian-based images by setting the value to +# `public.ecr.aws/gravitational/teleport-ent`. Those images will be +# removed with teleport 14. +enterpriseImage: public.ecr.aws/gravitational/teleport-ent-distroless +# Optional array of imagePullSecrets, to use when pulling from a private registry +imagePullSecrets: [] +# Teleport logging configuration +log: + # Log level for the Teleport process. + # Available log levels are: DEBUG, INFO, WARNING, ERROR. + # The default is INFO, which is recommended in production. + # DEBUG is useful during first-time setup or to see more detailed logs for debugging. + level: INFO + # Log output + # Use a file path to log to disk: e.g. '/var/lib/teleport/teleport.log' + # Other supported values: 'stdout', 'stderr' and 'syslog' + output: stderr + # Log format configuration + # Possible output values are 'json' and 'text' (default). + format: text + # Possible extra_fields values include: timestamp, component, caller, and level. + # All extra fields are included by default. + extraFields: ["timestamp", "level", "component", "caller"] + +################################## +# Extra Kubernetes configuration # +################################## + +# nodeSelector to apply for pod assignment +# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +nodeSelector: {} + +# Affinity for pod assignment +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# NOTE: If affinity is set here, highAvailability.requireAntiAffinity cannot also be used - you can only set one or the other. +affinity: {} + +# Kubernetes annotations to apply +# https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +annotations: + # Annotations for the ConfigMap + config: {} + # Annotations for the Deployment + deployment: {} + # Annotations for each Pod in the Deployment + pod: {} + # Annotations for the Service object + service: {} + # Annotations for the ServiceAccount object + serviceAccount: {} + # Annotations for the certificate secret generated by cert-manager v1.5+ when + # highAvailability.certManager.enabled is true + certSecret: {} + # Annotations for the Ingress object + ingress: {} + +# Kubernetes service account to create/use. +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and serviceAccount.create is true, the name is generated using the release name. + # If create is false, the name will be used to reference an existing service account. + name: "" + # To set annotations on the service account, use the annotations.serviceAccount value. + +# Set to true (default) to create Kubernetes ClusterRole and ClusterRoleBinding. +rbac: + # Specifies whether a ClusterRole and ClusterRoleBinding should be created. + # Set to false if your cluster level resources are managed separately. + create: true + +# Options for the Teleport proxy service +# This setting only applies to the proxy service. The teleport auth service is internal-only and always uses a ClusterIP. +# You can override the proxy's backend service to any service type (other than "LoadBalancer") here if really needed. +# To use an Ingress, set service.type=ClusterIP and ingress.enabled=true +service: + type: LoadBalancer + # Additional entries here will be added to the service spec. + spec: {} + # loadBalancerIP: "1.2.3.4" + +# Options for ingress +# If you set ingress.enabled to true, service.type MUST also be set to something other than "LoadBalancer" to prevent +# additional unnecessary load balancers from being created. Ingress controllers should provision their own load balancer. +# Using an Ingress also requires that you use the `tsh` client to connect to Kubernetes clusters and databases behind Teleport. +# See https://goteleport.com/docs/architecture/tls-routing/#working-with-layer-7-load-balancers-or-reverse-proxies-preview for details. +ingress: + enabled: false + # Setting suppressAutomaticWildcards to true will not automatically add *. as a hostname served + # by the Ingress. This may be desirable if you don't use Teleport Application Access. + suppressAutomaticWildcards: false + # Additional entries here will be added to the ingress spec. + spec: {} + # ingressClassName: nginx + +# Extra arguments to pass to 'teleport start' for the main Teleport pod +extraArgs: [] + +# Extra environment to be configured on the Teleport pod +extraEnv: [] + +# Extra volumes to mount into the Teleport pods +# https://kubernetes.io/docs/concepts/storage/volumes/ +extraVolumes: [] +# - name: myvolume +# secret: +# secretName: testSecret + +# Extra volume mounts corresponding to the volumes mounted above +extraVolumeMounts: [] +# - name: myvolume +# mountPath: /path/on/host + +# Allow the imagePullPolicy to be overridden +imagePullPolicy: IfNotPresent + +# A list of initContainers to run before each Teleport pod starts +# https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +initContainers: [] +# - name: "teleport-init" +# image: "alpine" +# args: ["echo test"] + +# If set, will run the command as a postStart handler +# https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ +postStart: + command: [] + +# Resources to request for the teleport container +# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +resources: {} +# requests: +# cpu: "1" +# memory: "2Gi" + +# Security context to add to the container +securityContext: {} + # runAsUser: 99 + +# Priority class name to add to the deployment +priorityClassName: "" + +# Tolerations for pod assignment +# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Timeouts for the readiness and liveness probes +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +probeTimeoutSeconds: 1 + +# Kubernetes termination grace period +# https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +# +# This should be greater than 30 seconds as pods are waiting 30 seconds in a preStop hook. +terminationGracePeriodSeconds: 60 diff --git a/helm/teleport-cluster/.lint/extra-containers.yaml b/helm/teleport-cluster/.lint/extra-containers.yaml new file mode 100644 index 0000000..14d04af --- /dev/null +++ b/helm/teleport-cluster/.lint/extra-containers.yaml @@ -0,0 +1,12 @@ +clusterName: helm-lint.example.com +extraContainers: + - name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false diff --git a/helm/teleport-cluster/Chart.yaml b/helm/teleport-cluster/Chart.yaml index 73b2a2d..d8ce69d 100644 --- a/helm/teleport-cluster/Chart.yaml +++ b/helm/teleport-cluster/Chart.yaml @@ -1,13 +1,13 @@ apiVersion: v2 -appVersion: 13.3.9 +appVersion: 14.2.0 dependencies: - condition: installCRDs,operator.enabled name: teleport-operator repository: "" - version: 13.3.9 + version: 14.2.0 description: Teleport is an access platform for your infrastructure icon: https://goteleport.com/images/logos/logo-teleport-square.svg keywords: - Teleport name: teleport-cluster -version: 13.3.9 +version: 14.2.0 diff --git a/helm/teleport-cluster/charts/teleport-operator/Chart.yaml b/helm/teleport-cluster/charts/teleport-operator/Chart.yaml index 944d2ad..2d264d9 100644 --- a/helm/teleport-cluster/charts/teleport-operator/Chart.yaml +++ b/helm/teleport-cluster/charts/teleport-operator/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: 13.3.9 +appVersion: 14.2.0 description: Teleport Operator provides management of select Teleport resources. icon: https://goteleport.com/images/logos/logo-teleport-square.svg keywords: - Teleport name: teleport-operator -version: 13.3.9 +version: 14.2.0 diff --git a/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml b/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml index 81c7d92..af6aa9c 100644 --- a/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml +++ b/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_provisiontokens.yaml @@ -192,20 +192,38 @@ spec: must match one allow rule to use this token. items: properties: + ci_config_ref_uri: + type: string + ci_config_sha: + type: string + deployment_tier: + type: string environment: type: string + environment_protected: + type: boolean namespace_path: type: string pipeline_source: type: string project_path: type: string + project_visibility: + type: string ref: type: string + ref_protected: + type: boolean ref_type: type: string sub: type: string + user_email: + type: string + user_id: + type: string + user_login: + type: string type: object nullable: true type: array @@ -235,6 +253,19 @@ spec: type: object nullable: true type: array + static_jwks: + description: StaticJWKS is the configuration specific to the `static_jwks` + type. + nullable: true + properties: + jwks: + type: string + type: object + type: + description: 'Type controls which behavior should be used for + validating the Kubernetes Service Account token. Support values: + - `in_cluster` - `static_jwks` If unset, this defaults to `in_cluster`.' + type: string type: object roles: description: Roles is a list of roles associated with the token, that @@ -244,6 +275,32 @@ spec: type: string nullable: true type: array + spacelift: + description: Spacelift allows the configuration of options specific + to the "spacelift" join method. + nullable: true + properties: + allow: + description: Allow is a list of Rules, nodes using this token + must match one allow rule to use this token. + items: + properties: + caller_id: + type: string + caller_type: + type: string + scope: + type: string + space_id: + type: string + type: object + nullable: true + type: array + hostname: + description: Hostname is the hostname of the Spacelift tenant + that tokens will originate from. E.g `example.app.spacelift.io` + type: string + type: object suggested_agent_matcher_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true diff --git a/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml b/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml index b305702..f02463b 100644 --- a/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml +++ b/helm/teleport-cluster/charts/teleport-operator/templates/resources.teleport.dev_roles.yaml @@ -236,6 +236,13 @@ spec: description: Namespace is the resource namespace. It supports wildcards. type: string + verbs: + description: Verbs are the allowed Kubernetes verbs for + the following resource. + items: + type: string + nullable: true + type: array type: object type: array kubernetes_users: @@ -690,6 +697,13 @@ spec: description: Namespace is the resource namespace. It supports wildcards. type: string + verbs: + description: Verbs are the allowed Kubernetes verbs for + the following resource. + items: + type: string + nullable: true + type: array type: object type: array kubernetes_users: @@ -949,8 +963,7 @@ spec: mode: description: Mode is the type of extension to be used -- currently critical-option is not supported - format: int32 - type: integer + x-kubernetes-int-or-string: true name: description: Name specifies the key to be used in the cert extension. @@ -958,8 +971,7 @@ spec: type: description: Type represents the certificate type being extended, only ssh is supported at this time. - format: int32 - type: integer + x-kubernetes-int-or-string: true value: description: Value specifies the value to be used in the cert extension. @@ -981,6 +993,10 @@ spec: description: CreateDatabaseUser enabled automatic database user creation. type: boolean + create_db_user_mode: + description: CreateDatabaseUserMode allows users to be automatically + created on a database when not set to off. + x-kubernetes-int-or-string: true create_desktop_user: description: CreateDesktopUser allows users to be automatically created on a Windows desktop @@ -992,8 +1008,7 @@ spec: create_host_user_mode: description: CreateHostUserMode allows users to be automatically created on a host when not set to off - format: int32 - type: integer + x-kubernetes-int-or-string: true desktop_clipboard: description: DesktopClipboard indicates whether clipboard sharing is allowed between the user's workstation and the remote desktop. @@ -1099,13 +1114,12 @@ spec: type: string request_prompt: description: RequestPrompt is an optional message which tells - users what they aught to + users what they aught to request. type: string require_session_mfa: description: RequireMFAType is the type of MFA requirement enforced for this user. - format: int32 - type: integer + x-kubernetes-int-or-string: true ssh_file_copy: description: SSHFileCopy indicates whether remote file operations via SCP or SFTP are allowed over an SSH session. It defaults @@ -1419,6 +1433,13 @@ spec: description: Namespace is the resource namespace. It supports wildcards. type: string + verbs: + description: Verbs are the allowed Kubernetes verbs for + the following resource. + items: + type: string + nullable: true + type: array type: object type: array kubernetes_users: @@ -1873,6 +1894,13 @@ spec: description: Namespace is the resource namespace. It supports wildcards. type: string + verbs: + description: Verbs are the allowed Kubernetes verbs for + the following resource. + items: + type: string + nullable: true + type: array type: object type: array kubernetes_users: @@ -2132,8 +2160,7 @@ spec: mode: description: Mode is the type of extension to be used -- currently critical-option is not supported - format: int32 - type: integer + x-kubernetes-int-or-string: true name: description: Name specifies the key to be used in the cert extension. @@ -2141,8 +2168,7 @@ spec: type: description: Type represents the certificate type being extended, only ssh is supported at this time. - format: int32 - type: integer + x-kubernetes-int-or-string: true value: description: Value specifies the value to be used in the cert extension. @@ -2164,6 +2190,10 @@ spec: description: CreateDatabaseUser enabled automatic database user creation. type: boolean + create_db_user_mode: + description: CreateDatabaseUserMode allows users to be automatically + created on a database when not set to off. + x-kubernetes-int-or-string: true create_desktop_user: description: CreateDesktopUser allows users to be automatically created on a Windows desktop @@ -2175,8 +2205,7 @@ spec: create_host_user_mode: description: CreateHostUserMode allows users to be automatically created on a host when not set to off - format: int32 - type: integer + x-kubernetes-int-or-string: true desktop_clipboard: description: DesktopClipboard indicates whether clipboard sharing is allowed between the user's workstation and the remote desktop. @@ -2282,13 +2311,12 @@ spec: type: string request_prompt: description: RequestPrompt is an optional message which tells - users what they aught to + users what they aught to request. type: string require_session_mfa: description: RequireMFAType is the type of MFA requirement enforced for this user. - format: int32 - type: integer + x-kubernetes-int-or-string: true ssh_file_copy: description: SSHFileCopy indicates whether remote file operations via SCP or SFTP are allowed over an SSH session. It defaults diff --git a/helm/teleport-cluster/override-values.yaml b/helm/teleport-cluster/override-values.yaml index 4989c7f..67f5045 100644 --- a/helm/teleport-cluster/override-values.yaml +++ b/helm/teleport-cluster/override-values.yaml @@ -1,6 +1,5 @@ chartMode: standalone clusterName: teleport.kr.datasaker.io - #teleportVersionOverride: "13.3.8" auth: teleportConfig: diff --git a/helm/teleport-cluster/templates/auth/deployment.yaml b/helm/teleport-cluster/templates/auth/deployment.yaml index 8c71803..699d135 100644 --- a/helm/teleport-cluster/templates/auth/deployment.yaml +++ b/helm/teleport-cluster/templates/auth/deployment.yaml @@ -248,6 +248,13 @@ spec: port: 8081 initialDelaySeconds: 5 periodSeconds: 10 + ports: + - name: op-metrics + containerPort: 8080 + protocol: TCP + - name: op-health + containerPort: 8081 + protocol: TCP {{- if .Values.operator.resources }} resources: {{- toYaml .Values.operator.resources | nindent 10 }} {{- end }} @@ -263,6 +270,9 @@ spec: readOnly: true {{- end }} {{ end }} +{{- if $auth.extraContainers }} + {{- toYaml $auth.extraContainers | nindent 6 }} +{{- end }} {{- if $projectedServiceAccountToken }} automountServiceAccountToken: false {{- end }} diff --git a/helm/teleport-cluster/templates/proxy/_config.common.tpl b/helm/teleport-cluster/templates/proxy/_config.common.tpl index b6c5e41..32dd85c 100644 --- a/helm/teleport-cluster/templates/proxy/_config.common.tpl +++ b/helm/teleport-cluster/templates/proxy/_config.common.tpl @@ -70,7 +70,10 @@ proxy_service: uri: {{ .Values.acmeURI }} {{- end }} {{- end }} -{{- if and .Values.ingress.enabled (semverCompare ">= 13.2.0-0" (include "teleport-cluster.version" .)) }} +{{- if .Values.proxyProtocol }} + proxy_protocol: {{ .Values.proxyProtocol | quote }} +{{- end }} +{{- if and .Values.ingress.enabled (semverCompare ">= 14.0.0-0" (include "teleport-cluster.version" .)) }} trust_x_forwarded_for: true {{- end }} {{- end -}} diff --git a/helm/teleport-cluster/templates/proxy/certificate.yaml b/helm/teleport-cluster/templates/proxy/certificate.yaml index d1a98ee..d2a4dbd 100644 --- a/helm/teleport-cluster/templates/proxy/certificate.yaml +++ b/helm/teleport-cluster/templates/proxy/certificate.yaml @@ -1,7 +1,22 @@ {{- $proxy := mustMergeOverwrite (mustDeepCopy .Values) .Values.proxy -}} -{{- if $proxy.highAvailability.certManager.enabled }} - {{- $domain := (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }} - {{- $domainWildcard := printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }} +{{- if $proxy.highAvailability.certManager.enabled -}} + {{- /* Append clusterName and wildcard version to list of dnsNames on certificate request (original functionality) */ -}} + {{- $domainList := list (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) -}} + {{- $domainList := append $domainList (printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName)) -}} + {{- /* If the config option is enabled and at least one publicAddr is set, append all public addresses to the list of dnsNames */ -}} + {{- if and $proxy.highAvailability.certManager.addPublicAddrs (gt (len .Values.publicAddr) 0) -}} + {{- /* Trim ports from all public addresses if present */ -}} + {{- range .Values.publicAddr -}} + {{- $address := . -}} + {{- if (contains ":" $address) -}} + {{- $split := split ":" $address -}} + {{- $address = $split._0 -}} + {{- end -}} + {{- $domainList = append (mustWithout $domainList .) $address -}} + {{- end -}} + {{- end -}} + {{- /* Finally, remove any duplicate entries from the list of domains */ -}} + {{- $domainList := mustUniq $domainList -}} apiVersion: cert-manager.io/v1 kind: Certificate metadata: @@ -11,11 +26,12 @@ metadata: spec: secretName: teleport-tls {{- if $proxy.highAvailability.certManager.addCommonName }} - commonName: {{ quote $domain }} + commonName: {{ quote $proxy.clusterName }} {{- end }} dnsNames: - - {{ quote $domain }} - - {{ quote $domainWildcard }} + {{- range $domainList }} + - {{ quote . }} + {{- end }} issuerRef: name: {{ required "highAvailability.certManager.issuerName is required in chart values" $proxy.highAvailability.certManager.issuerName }} kind: {{ required "highAvailability.certManager.issuerKind is required in chart values" $proxy.highAvailability.certManager.issuerKind }} diff --git a/helm/teleport-cluster/templates/proxy/deployment.yaml b/helm/teleport-cluster/templates/proxy/deployment.yaml index a77c339..68cfbd5 100644 --- a/helm/teleport-cluster/templates/proxy/deployment.yaml +++ b/helm/teleport-cluster/templates/proxy/deployment.yaml @@ -255,6 +255,9 @@ spec: {{- if $proxy.extraVolumeMounts }} {{- toYaml $proxy.extraVolumeMounts | nindent 8 }} {{- end }} +{{- if $proxy.extraContainers }} + {{- toYaml $proxy.extraContainers | nindent 6 }} +{{- end }} {{- if $projectedServiceAccountToken }} automountServiceAccountToken: false {{- end }} diff --git a/helm/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap b/helm/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap index f3f40c9..14d5a57 100644 --- a/helm/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap +++ b/helm/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap @@ -1,6 +1,6 @@ should add an operator side-car when operator is enabled: 1: | - image: public.ecr.aws/gravitational/teleport-operator:13.3.9 + image: public.ecr.aws/gravitational/teleport-operator:14.2.0 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -9,6 +9,13 @@ should add an operator side-car when operator is enabled: initialDelaySeconds: 15 periodSeconds: 20 name: operator + ports: + - containerPort: 8080 + name: op-metrics + protocol: TCP + - containerPort: 8081 + name: op-health + protocol: TCP readinessProbe: httpGet: path: /readyz @@ -34,7 +41,7 @@ should add an operator side-car when operator is enabled: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -167,7 +174,7 @@ should set nodeSelector when set in values: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -264,7 +271,7 @@ should set resources when set in values: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -350,7 +357,7 @@ should set securityContext when set in values: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: diff --git a/helm/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap b/helm/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap index 319cbd8..ff19c7f 100644 --- a/helm/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap +++ b/helm/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap @@ -1,3 +1,55 @@ +? should not request a certificate for cluster name and publicAddrs when cert-manager + is enabled and proxy.highAvailability.certManager.addPublicAddrs is not set (cert-manager.yaml) +: 1: | + - test-cluster + - '*.test-cluster' + 2: | + group: custom.cert-manager.io + kind: CustomClusterIssuer + name: custom +? should not request a certificate for cluster name and publicAddrs when cert-manager + is enabled and proxy.highAvailability.certManager.addPublicAddrs is not set (cert-secret.yaml) +: 1: | + - test-cluster + - '*.test-cluster' + 2: | + group: cert-manager.io + kind: Issuer + name: letsencrypt +? should request a certificate for cluster name and publicAddrs when cert-manager + is enabled and proxy.highAvailability.certManager.addPublicAddrs is set (cert-manager.yaml) +: 1: | + - test-cluster + - '*.test-cluster' + - teleport.test.com + - teleport.shared-services.old-domain.com + 2: | + group: custom.cert-manager.io + kind: CustomClusterIssuer + name: custom +? should request a certificate for cluster name and publicAddrs when cert-manager + is enabled and proxy.highAvailability.certManager.addPublicAddrs is set (cert-secret.yaml) +: 1: | + - test-cluster + - '*.test-cluster' + - teleport.test.com + - teleport.shared-services.old-domain.com + 2: | + group: cert-manager.io + kind: Issuer + name: letsencrypt +? should request a certificate for cluster name and publicAddrs when cert-manager + is enabled and proxy.highAvailability.certManager.addPublicAddrs is set, removing + duplicates +: 1: | + - test-cluster + - '*.test-cluster' + - teleport.test.com + - teleport.shared-services.old-domain.com + 2: | + group: custom.cert-manager.io + kind: CustomClusterIssuer + name: custom should request a certificate for cluster name when cert-manager is enabled (cert-manager.yaml): 1: | - test-cluster diff --git a/helm/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap b/helm/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap index d2858df..490e0bf 100644 --- a/helm/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap +++ b/helm/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap @@ -1,4 +1,4 @@ -generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled is not set: +generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 14.0.0 and ingress.enabled is not set: 1: | |- auth_service: @@ -28,7 +28,7 @@ generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version output: stderr severity: INFO version: v3 -generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled=true: +generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 14.0.0 and ingress.enabled=true: 1: | |- auth_service: @@ -54,7 +54,7 @@ generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version output: stderr severity: INFO version: v3 -generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled is not set: +generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=14.0.0 and ingress.enabled is not set: 1: | |- auth_service: @@ -141,7 +141,7 @@ generates a config with proxy_service.trust_x_forwarded_for=true when version = output: stderr severity: INFO version: v3 -generates a config with proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled=true: +generates a config with proxy_service.trust_x_forwarded_for=true when version >=14.0.0 and ingress.enabled=true: 1: | |- auth_service: diff --git a/helm/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap b/helm/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap index 73629a8..e8362a0 100644 --- a/helm/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap +++ b/helm/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap @@ -4,8 +4,8 @@ should provision initContainer correctly when set in values: - teleport - wait - no-resolve - - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 name: wait-auth-update - args: - echo test @@ -62,7 +62,7 @@ should set nodeSelector when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -122,8 +122,8 @@ should set nodeSelector when set in values: - teleport - wait - no-resolve - - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 name: wait-auth-update nodeSelector: environment: security @@ -174,7 +174,7 @@ should set resources when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -241,8 +241,8 @@ should set resources when set in values: - teleport - wait - no-resolve - - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 name: wait-auth-update serviceAccountName: RELEASE-NAME-proxy terminationGracePeriodSeconds: 60 @@ -275,7 +275,7 @@ should set securityContext for initContainers when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -342,8 +342,8 @@ should set securityContext for initContainers when set in values: - teleport - wait - no-resolve - - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 name: wait-auth-update securityContext: allowPrivilegeEscalation: false @@ -383,7 +383,7 @@ should set securityContext when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -450,8 +450,8 @@ should set securityContext when set in values: - teleport - wait - no-resolve - - RELEASE-NAME-auth-v12.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:13.3.9 + - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local + image: public.ecr.aws/gravitational/teleport-distroless:14.2.0 name: wait-auth-update securityContext: allowPrivilegeEscalation: false diff --git a/helm/teleport-cluster/tests/auth_deployment_test.yaml b/helm/teleport-cluster/tests/auth_deployment_test.yaml index cc8cb58..d838ca9 100644 --- a/helm/teleport-cluster/tests/auth_deployment_test.yaml +++ b/helm/teleport-cluster/tests/auth_deployment_test.yaml @@ -304,6 +304,7 @@ tests: name: my-mount secret: secretName: mySecret + - it: should set imagePullPolicy when set in values template: auth/deployment.yaml set: @@ -314,6 +315,36 @@ tests: path: spec.template.spec.containers[0].imagePullPolicy value: Always + - it: should have only one container when no `extraContainers` is set in values + template: auth/deployment.yaml + set: + extraContainers: [] + clusterName: helm-lint.example.com + asserts: + - isNotNull: + path: spec.template.spec.containers[0] + - isNull: + path: spec.template.spec.containers[1] + + - it: should add one more container when `extraContainers` is set in values + template: auth/deployment.yaml + values: + - ../.lint/extra-containers.yaml + asserts: + - equal: + path: spec.template.spec.containers[1] + value: + name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false + - it: should set environment when extraEnv set in values template: auth/deployment.yaml values: diff --git a/helm/teleport-cluster/tests/proxy_certificate_test.yaml b/helm/teleport-cluster/tests/proxy_certificate_test.yaml index d1d8f0c..3d50476 100644 --- a/helm/teleport-cluster/tests/proxy_certificate_test.yaml +++ b/helm/teleport-cluster/tests/proxy_certificate_test.yaml @@ -14,6 +14,9 @@ tests: path: spec.dnsNames - matchSnapshot: path: spec.issuerRef + - equal: + path: spec.commonName + value: test-cluster - it: should request a certificate for cluster name when cert-manager is enabled (cert-secret.yaml) values: @@ -27,3 +30,165 @@ tests: path: spec.dnsNames - matchSnapshot: path: spec.issuerRef + + - it: should request a certificate for cluster name and publicAddrs when cert-manager is enabled and proxy.highAvailability.certManager.addPublicAddrs is set (cert-manager.yaml) + values: + - ../.lint/cert-manager.yaml + set: + publicAddr: ['teleport.test.com:443', 'teleport.shared-services.old-domain.com:443'] + highAvailability: + certManager: + addPublicAddrs: true + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef + - equal: + path: spec.commonName + value: test-cluster + - equal: + path: spec.dnsNames[0] + value: "test-cluster" + - equal: + path: spec.dnsNames[1] + value: "*.test-cluster" + - equal: + path: spec.dnsNames[2] + value: "teleport.test.com" + - equal: + path: spec.dnsNames[3] + value: "teleport.shared-services.old-domain.com" + + - it: should not request a certificate for cluster name and publicAddrs when cert-manager is enabled and proxy.highAvailability.certManager.addPublicAddrs is not set (cert-manager.yaml) + values: + - ../.lint/cert-manager.yaml + set: + publicAddr: ['teleport.test.com:443', 'teleport.shared-services.old-domain.com:443'] + highAvailability: + certManager: + addPublicAddrs: false + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef + - equal: + path: spec.commonName + value: test-cluster + - equal: + path: spec.dnsNames[0] + value: "test-cluster" + - equal: + path: spec.dnsNames[1] + value: "*.test-cluster" + - notEqual: + path: spec.dnsNames[2] + value: "teleport.test.com" + - notEqual: + path: spec.dnsNames[3] + value: "teleport.shared-services.old-domain.com" + + - it: should request a certificate for cluster name and publicAddrs when cert-manager is enabled and proxy.highAvailability.certManager.addPublicAddrs is set (cert-secret.yaml) + values: + - ../.lint/cert-secret.yaml + set: + publicAddr: ['teleport.test.com:443', 'teleport.shared-services.old-domain.com:443'] + highAvailability: + certManager: + addPublicAddrs: true + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef + - equal: + path: spec.dnsNames[0] + value: "test-cluster" + - equal: + path: spec.dnsNames[1] + value: "*.test-cluster" + - equal: + path: spec.dnsNames[2] + value: "teleport.test.com" + - equal: + path: spec.dnsNames[3] + value: "teleport.shared-services.old-domain.com" + + - it: should not request a certificate for cluster name and publicAddrs when cert-manager is enabled and proxy.highAvailability.certManager.addPublicAddrs is not set (cert-secret.yaml) + values: + - ../.lint/cert-secret.yaml + set: + publicAddr: ['teleport.test.com:443', 'teleport.shared-services.old-domain.com:443'] + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef + - notEqual: + path: spec.commonName + value: test-cluster + - equal: + path: spec.dnsNames[0] + value: "test-cluster" + - equal: + path: spec.dnsNames[1] + value: "*.test-cluster" + - notEqual: + path: spec.dnsNames[2] + value: "teleport.test.com" + - notEqual: + path: spec.dnsNames[3] + value: "teleport.shared-services.old-domain.com" + + - it: should request a certificate for cluster name and publicAddrs when cert-manager is enabled and proxy.highAvailability.certManager.addPublicAddrs is set, removing duplicates + values: + - ../.lint/cert-manager.yaml + set: + publicAddr: ['test-cluster:443', 'teleport.test.com:443', 'teleport.shared-services.old-domain.com:443', 'teleport.test.com:443'] + highAvailability: + certManager: + addPublicAddrs: true + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Certificate + - matchSnapshot: + path: spec.dnsNames + - matchSnapshot: + path: spec.issuerRef + - equal: + path: spec.dnsNames[0] + value: "test-cluster" + - equal: + path: spec.dnsNames[1] + value: "*.test-cluster" + - notEqual: + path: spec.dnsNames[2] + value: "test-cluster" + - equal: + path: spec.dnsNames[2] + value: "teleport.test.com" + - equal: + path: spec.dnsNames[3] + value: "teleport.shared-services.old-domain.com" + - notEqual: + path: spec.dnsNames[4] + value: "teleport.test.com" diff --git a/helm/teleport-cluster/tests/proxy_config_test.yaml b/helm/teleport-cluster/tests/proxy_config_test.yaml index cbacce9..02bc186 100644 --- a/helm/teleport-cluster/tests/proxy_config_test.yaml +++ b/helm/teleport-cluster/tests/proxy_config_test.yaml @@ -163,9 +163,9 @@ tests: - failedTemplate: errorMessage: "clusterName must not contain a colon, you can override the cluster's public address with publicAddr" - - it: generates a config with proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled=true + - it: generates a config with proxy_service.trust_x_forwarded_for=true when version >=14.0.0 and ingress.enabled=true chart: - version: 13.2.0 + version: 14.0.0 values: - ../.lint/ingress.yaml set: @@ -193,9 +193,9 @@ tests: - matchSnapshot: path: data.teleport\.yaml - - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=13.2.0 and ingress.enabled is not set + - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version >=14.0.0 and ingress.enabled is not set chart: - version: 13.2.0 + version: 14.0.0 set: clusterName: "helm-test.example.com" asserts: @@ -206,7 +206,7 @@ tests: - matchSnapshot: path: data.teleport\.yaml - - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled=true + - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 14.0.0 and ingress.enabled=true chart: version: 13.1.5 values: @@ -221,9 +221,9 @@ tests: - matchSnapshot: path: data.teleport\.yaml - - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 13.2.0 and ingress.enabled is not set + - it: generates a config WITHOUT proxy_service.trust_x_forwarded_for=true when version < 14.0.0 and ingress.enabled is not set chart: - version: 13.1.5 + version: 14.0.0 set: clusterName: "helm-test.example.com" asserts: @@ -233,3 +233,26 @@ tests: of: ConfigMap - matchSnapshot: path: data.teleport\.yaml + - it: sets "proxy_protocol" to "on" + set: + proxyProtocol: "on" + clusterName: teleport.example.com + asserts: + - matchRegex: + path: data.teleport\.yaml + pattern: 'proxy_protocol: "on"' + - it: sets "proxy_protocol" to "off" + set: + proxyProtocol: "off" + clusterName: teleport.example.com + asserts: + - matchRegex: + path: data.teleport\.yaml + pattern: 'proxy_protocol: "off"' + - it: does not set "proxy_protocol" + set: + clusterName: teleport.example.com + asserts: + - notMatchRegex: + path: data.teleport\.yaml + pattern: 'proxy_protocol:' diff --git a/helm/teleport-cluster/tests/proxy_deployment_test.yaml b/helm/teleport-cluster/tests/proxy_deployment_test.yaml index 4c4ddf4..c4cbfcc 100644 --- a/helm/teleport-cluster/tests/proxy_deployment_test.yaml +++ b/helm/teleport-cluster/tests/proxy_deployment_test.yaml @@ -332,6 +332,36 @@ tests: path: spec.template.spec.containers[0].imagePullPolicy value: Always + - it: should have only one container when no `extraContainers` is set in values + template: proxy/deployment.yaml + set: + extraContainers: [] + clusterName: helm-lint.example.com + asserts: + - isNotNull: + path: spec.template.spec.containers[0] + - isNull: + path: spec.template.spec.containers[1] + + - it: should add one more container when `extraContainers` is set in values + template: proxy/deployment.yaml + values: + - ../.lint/extra-containers.yaml + asserts: + - equal: + path: spec.template.spec.containers[1] + value: + name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false + - it: should set environment when extraEnv set in values template: proxy/deployment.yaml values: diff --git a/helm/teleport-cluster/values.schema.json b/helm/teleport-cluster/values.schema.json index 3169457..675f9b5 100644 --- a/helm/teleport-cluster/values.schema.json +++ b/helm/teleport-cluster/values.schema.json @@ -19,6 +19,7 @@ "affinity", "nodeSelector", "annotations", + "extraContainers", "extraVolumes", "extraVolumeMounts", "imagePullPolicy", @@ -33,6 +34,15 @@ "type": "string", "default": "" }, + "proxyProtocol": { + "$id": "#/properties/proxyProtocol", + "type": "string", + "default": "", + "enum": [ + "off", + "on" + ] + }, "auth": { "$id": "#/properties/auth", "type": "object" @@ -49,7 +59,9 @@ "podMonitor": { "$id": "#/properties/podMonitor", "type": "object", - "required": ["enabled"], + "required": [ + "enabled" + ], "properties": { "enabled": { "$id": "#/properties/podMonitor/enabled", @@ -59,8 +71,12 @@ "additionalLabels": { "$id": "#/properties/podMonitor/additionalLabels", "type": "object", - "default": {"prometheus": "default"}, - "additionalProperties": {"type": "string"} + "default": { + "prometheus": "default" + }, + "additionalProperties": { + "type": "string" + } }, "interval": { "$id": "#/properties/podMonitor/interval", @@ -72,7 +88,10 @@ "authentication": { "$id": "#/properties/authentication", "type": "object", - "required": ["type", "localAuth"], + "required": [ + "type", + "localAuth" + ], "properties": { "type": { "$id": "#/properties/authentication/properties/type", @@ -97,7 +116,13 @@ "secondFactor": { "$id": "#/properties/authentication/properties/secondFactor", "type": "string", - "enum": ["off", "on", "otp", "optional", "webauthn"], + "enum": [ + "off", + "on", + "otp", + "optional", + "webauthn" + ], "default": "otp" }, "webauthn": { @@ -131,7 +156,13 @@ "secondFactor": { "$id": "#/properties/authenticationSecondFactor/properties/secondFactor", "type": "string", - "enum": ["off", "on", "otp", "optional", "webauthn"], + "enum": [ + "off", + "on", + "otp", + "optional", + "webauthn" + ], "default": "otp" }, "webauthn": { @@ -261,7 +292,9 @@ "operator": { "$id": "#/properties/operator", "type": "object", - "required": ["enabled"], + "required": [ + "enabled" + ], "properties": { "enabled": { "$id": "#/properties/operator/properties/enabled", @@ -587,6 +620,11 @@ "type": "boolean", "default": "false" }, + "addPublicAddrs": { + "$id": "#/properties/highAvailability/properties/certManager/properties/addPublicAddrs", + "type": "boolean", + "default": "false" + }, "enabled": { "$id": "#/properties/highAvailability/properties/certManager/properties/enabled", "type": "boolean", @@ -695,7 +733,13 @@ "level": { "$id": "#/properties/log/properties/level", "type": "string", - "enum": ["DEBUG", "INFO", "WARN", "WARNING", "ERROR"], + "enum": [ + "DEBUG", + "INFO", + "WARN", + "WARNING", + "ERROR" + ], "default": "INFO" }, "deployment": { @@ -845,6 +889,11 @@ "type": "array", "default": [] }, + "extraContainers": { + "$id": "#/properties/extraContainers", + "type": "array", + "default": [] + }, "extraVolumes": { "$id": "#/properties/extraVolumes", "type": "array", diff --git a/helm/teleport-cluster/values.yaml b/helm/teleport-cluster/values.yaml index 54283ec..1a11af3 100644 --- a/helm/teleport-cluster/values.yaml +++ b/helm/teleport-cluster/values.yaml @@ -30,6 +30,30 @@ kubeClusterName: "" # If you want to run Teleport version X, you should use `helm --version X` instead. teleportVersionOverride: "" +# The `proxyProtocol` value controls whether the Proxy pods will +# accept PROXY lines with the client's IP address when they are +# behind a L4 load balancer (e.g. AWS ELB, GCP L4 LB, etc) with PROXY protocol +# enabled. Since L4 LBs do not preserve the client's IP address, PROXY protocol is +# required to ensure that Teleport can properly audit the client's IP address. +# +# When Teleport pods are not behind a L4 LB with PROXY protocol enabled, this +# value should be set to "off" to prevent Teleport from accepting PROXY headers +# from untrusted sources. +# Possible values are "on" and "off". +# - "on" will enable the PROXY protocol for all connections and will require the +# L4 LB to send a PROXY header. +# - "off" will disable the PROXY protocol for all connections and denies all +# connections prefixed with a PROXY header. +# +# If proxyProtocol is unspecified, Teleport does not require PROXY header for the +# connection, but will accept it if present. This mode is considered insecure +# and should only be used for testing purposes. +# +# See https://goteleport.com/docs/ver/14.x/management/security/proxy-protocol/ +# for more information. +# +# proxyProtocol: on + # The `teleport-cluster` charts deploys two sets of pods: auth and proxy. # `auth` contains values specific for the auth pods. You can use it to # set specific values for auth pods, taking precedence over chart-scoped values. @@ -437,10 +461,13 @@ highAvailability: # Settings for cert-manager (can be used for provisioning TLS certs in HA mode) # These settings are mutually exclusive with the "tls" value below. certManager: - # If set to true, a common name matching the cluster name will be set in the certificate signing request. This is mandatory for some CAs. - addCommonName: false # If set to true, use cert-manager to get certificates for Teleport to use for TLS termination enabled: false + # If set to true, a common name matching the cluster name will be set in the certificate signing request. This is mandatory for some CAs. + addCommonName: false + # If set to true, any additional public addresses configured under the `publicAddr` chart value will be added to the certificate signing request. + # This setting is not enabled by default to preserve backward compatibility. + addPublicAddrs: false # Name of the Issuer/ClusterIssuer to use for certs # NOTE: You will always need to create this yourself when certManager.enabled is true. issuerName: "" @@ -582,6 +609,19 @@ extraArgs: [] # Extra environment to be configured on the Teleport pod extraEnv: [] +# Extra containers to be added to the Teleport pod +extraContainers: [] +# - name: nscenter +# command: +# - /bin/bash +# - -c +# - sleep infinity & wait +# image: praqma/network-multitool +# imagePullPolicy: IfNotPresent +# securityContext: +# privileged: true +# runAsNonRoot: false + # Extra volumes to mount into the Teleport pods # https://kubernetes.io/docs/concepts/storage/volumes/ extraVolumes: []