--- # Source: vault/templates/injector-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: release-name-vault-agent-injector namespace: dsk-middle labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm --- # Source: vault/templates/server-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: release-name-vault namespace: dsk-middle labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm --- # Source: vault/templates/server-config-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: release-name-vault-config namespace: dsk-middle labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm data: extraconfig-from-values.hcl: |- disable_mlock = true ui = true listener "tcp" { tls_disable = 1 address = "[::]:8200" cluster_address = "[::]:8201" } storage "file" { path = "/vault/data" } seal "awskms" { region = "ap-northeast-2" access_key = "AKIAXMVVF3TA3NTIIHN6" secret_key = "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" kms_key_id = "c5b3ae3a-e976-4773-abcb-18d68c26a67d" } --- # Source: vault/templates/injector-clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: release-name-vault-agent-injector-clusterrole labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm rules: - apiGroups: ["admissionregistration.k8s.io"] resources: ["mutatingwebhookconfigurations"] verbs: - "get" - "list" - "watch" - "patch" --- # Source: vault/templates/injector-clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: release-name-vault-agent-injector-binding labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: release-name-vault-agent-injector-clusterrole subjects: - kind: ServiceAccount name: release-name-vault-agent-injector namespace: dsk-middle --- # Source: vault/templates/server-clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: release-name-vault-server-binding labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: release-name-vault namespace: dsk-middle --- # Source: vault/templates/injector-service.yaml apiVersion: v1 kind: Service metadata: name: release-name-vault-agent-injector-svc namespace: dsk-middle labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm spec: ports: - name: https port: 443 targetPort: 8080 selector: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name component: webhook --- # Source: vault/templates/server-headless-service.yaml # Service for Vault cluster apiVersion: v1 kind: Service metadata: name: release-name-vault-internal namespace: dsk-middle labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm vault-internal: "true" annotations: spec: clusterIP: None publishNotReadyAddresses: true ports: - name: "http" port: 8200 targetPort: 8200 - name: https-internal port: 8201 targetPort: 8201 selector: app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name component: server --- # Source: vault/templates/server-service.yaml # Service for Vault cluster apiVersion: v1 kind: Service metadata: name: release-name-vault namespace: dsk-middle labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm annotations: spec: # We want the servers to become available even if they're not ready # since this DNS is also used for join operations. publishNotReadyAddresses: true ports: - name: http port: 8200 targetPort: 8200 - name: https-internal port: 8201 targetPort: 8201 selector: app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name component: server --- # Source: vault/templates/ui-service.yaml apiVersion: v1 kind: Service metadata: name: release-name-vault-ui namespace: dsk-middle labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault-ui app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm spec: selector: app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name component: server publishNotReadyAddresses: true ports: - name: http port: 8200 targetPort: 8200 nodePort: 32702 type: NodePort externalTrafficPolicy: Cluster --- # Source: vault/templates/injector-deployment.yaml # Deployment for the injector apiVersion: apps/v1 kind: Deployment metadata: name: release-name-vault-agent-injector namespace: dsk-middle labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm component: webhook spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name component: webhook template: metadata: labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name component: webhook spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-kafka tolerations: - key: dev/data-kafka operator: Exists serviceAccountName: "release-name-vault-agent-injector" securityContext: runAsNonRoot: true runAsGroup: 1000 runAsUser: 100 fsGroup: 1000 hostNetwork: false containers: - name: sidecar-injector image: "hashicorp/vault-k8s:1.0.1" imagePullPolicy: "IfNotPresent" securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL env: - name: AGENT_INJECT_LISTEN value: :8080 - name: AGENT_INJECT_LOG_LEVEL value: info - name: AGENT_INJECT_VAULT_ADDR value: http://release-name-vault.dsk-middle.svc:8200 - name: AGENT_INJECT_VAULT_AUTH_PATH value: auth/kubernetes - name: AGENT_INJECT_VAULT_IMAGE value: "hashicorp/vault:1.12.0" - name: AGENT_INJECT_TLS_AUTO value: release-name-vault-agent-injector-cfg - name: AGENT_INJECT_TLS_AUTO_HOSTS value: release-name-vault-agent-injector-svc,release-name-vault-agent-injector-svc.dsk-middle,release-name-vault-agent-injector-svc.dsk-middle.svc - name: AGENT_INJECT_LOG_FORMAT value: standard - name: AGENT_INJECT_REVOKE_ON_SHUTDOWN value: "false" - name: AGENT_INJECT_CPU_REQUEST value: "250m" - name: AGENT_INJECT_CPU_LIMIT value: "500m" - name: AGENT_INJECT_MEM_REQUEST value: "64Mi" - name: AGENT_INJECT_MEM_LIMIT value: "128Mi" - name: AGENT_INJECT_DEFAULT_TEMPLATE value: "map" - name: AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE value: "true" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name args: - agent-inject - 2>&1 livenessProbe: httpGet: path: /health/ready port: 8080 scheme: HTTPS failureThreshold: 2 initialDelaySeconds: 5 periodSeconds: 2 successThreshold: 1 timeoutSeconds: 5 readinessProbe: httpGet: path: /health/ready port: 8080 scheme: HTTPS failureThreshold: 2 initialDelaySeconds: 5 periodSeconds: 2 successThreshold: 1 timeoutSeconds: 5 --- # Source: vault/templates/server-statefulset.yaml # StatefulSet to run the actual vault server cluster. apiVersion: apps/v1 kind: StatefulSet metadata: name: release-name-vault namespace: dsk-middle labels: app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm spec: serviceName: release-name-vault-internal podManagementPolicy: Parallel replicas: 1 updateStrategy: type: OnDelete selector: matchLabels: app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name component: server template: metadata: labels: helm.sh/chart: vault-0.22.1 app.kubernetes.io/name: vault app.kubernetes.io/instance: release-name component: server spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-kafka tolerations: - key: dev/data-kafka operator: Exists terminationGracePeriodSeconds: 10 serviceAccountName: release-name-vault securityContext: runAsNonRoot: true runAsGroup: 1000 runAsUser: 100 fsGroup: 1000 hostNetwork: false volumes: - name: config configMap: name: release-name-vault-config - name: home emptyDir: {} containers: - name: vault image: hashicorp/vault:1.12.0 imagePullPolicy: IfNotPresent command: - "/bin/sh" - "-ec" args: - | cp /vault/config/extraconfig-from-values.hcl /tmp/storageconfig.hcl; [ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /tmp/storageconfig.hcl; [ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /tmp/storageconfig.hcl; [ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /tmp/storageconfig.hcl; [ -n "${API_ADDR}" ] && sed -Ei "s|API_ADDR|${API_ADDR?}|g" /tmp/storageconfig.hcl; [ -n "${TRANSIT_ADDR}" ] && sed -Ei "s|TRANSIT_ADDR|${TRANSIT_ADDR?}|g" /tmp/storageconfig.hcl; [ -n "${RAFT_ADDR}" ] && sed -Ei "s|RAFT_ADDR|${RAFT_ADDR?}|g" /tmp/storageconfig.hcl; /usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl securityContext: allowPrivilegeEscalation: false env: - name: HOST_IP valueFrom: fieldRef: fieldPath: status.hostIP - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: VAULT_K8S_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: VAULT_K8S_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: VAULT_ADDR value: "http://127.0.0.1:8200" - name: VAULT_API_ADDR value: "http://$(POD_IP):8200" - name: SKIP_CHOWN value: "true" - name: SKIP_SETCAP value: "true" - name: HOSTNAME valueFrom: fieldRef: fieldPath: metadata.name - name: VAULT_CLUSTER_ADDR value: "https://$(HOSTNAME).release-name-vault-internal:8201" - name: HOME value: "/home/vault" volumeMounts: - name: data mountPath: /vault/data - name: config mountPath: /vault/config - name: home mountPath: /home/vault ports: - containerPort: 8200 name: http - containerPort: 8201 name: https-internal - containerPort: 8202 name: http-rep readinessProbe: # Check status; unsealed vault servers return 0 # The exit code reflects the seal status: # 0 - unsealed # 1 - error # 2 - sealed exec: command: ["/bin/sh", "-ec", "vault status -tls-skip-verify"] failureThreshold: 2 initialDelaySeconds: 5 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 3 lifecycle: # Vault container doesn't receive SIGTERM from Kubernetes # and after the grace period ends, Kube sends SIGKILL. This # causes issues with graceful shutdowns such as deregistering itself # from Consul (zombie services). preStop: exec: command: [ "/bin/sh", "-c", # Adding a sleep here to give the pod eviction a # chance to propagate, so requests will not be made # to this pod while it's terminating "sleep 5 && kill -SIGTERM $(pidof vault)", ] volumeClaimTemplates: - metadata: name: data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: nfs-provisioner-dev --- # Source: vault/templates/injector-mutating-webhook.yaml apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: release-name-vault-agent-injector-cfg labels: app.kubernetes.io/name: vault-agent-injector app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm webhooks: - name: vault.hashicorp.com failurePolicy: Ignore matchPolicy: Exact sideEffects: None timeoutSeconds: 30 admissionReviewVersions: ["v1", "v1beta1"] clientConfig: service: name: release-name-vault-agent-injector-svc namespace: dsk-middle path: "/mutate" caBundle: "" rules: - operations: ["CREATE", "UPDATE"] apiGroups: [""] apiVersions: ["v1"] resources: ["pods"] objectSelector: matchExpressions: - key: app.kubernetes.io/name operator: NotIn values: - vault-agent-injector --- # Source: vault/templates/tests/server-test.yaml apiVersion: v1 kind: Pod metadata: name: "release-name-server-test" namespace: dsk-middle annotations: "helm.sh/hook": test spec: containers: - name: release-name-server-test image: hashicorp/vault:1.12.0 imagePullPolicy: IfNotPresent env: - name: VAULT_ADDR value: http://release-name-vault.dsk-middle.svc:8200 command: - /bin/sh - -c - | echo "Checking for sealed info in 'vault status' output" ATTEMPTS=10 n=0 until [ "$n" -ge $ATTEMPTS ] do echo "Attempt" $n... vault status -format yaml | grep -E '^sealed: (true|false)' && break n=$((n+1)) sleep 5 done if [ $n -ge $ATTEMPTS ]; then echo "timed out looking for sealed info in 'vault status' output" exit 1 fi exit 0 volumeMounts: volumes: restartPolicy: Never