kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

View File

@@ -0,0 +1,2 @@
---
gcp_pd_csi_controller_replicas: 1

View File

@@ -0,0 +1,47 @@
---
- name: GCP PD CSI Driver | Check if cloud-sa.json exists
fail:
msg: "Credentials file cloud-sa.json is mandatory"
when: gcp_pd_csi_sa_cred_file is not defined or not gcp_pd_csi_sa_cred_file
- name: GCP PD CSI Driver | Copy GCP credentials file
copy:
src: "{{ gcp_pd_csi_sa_cred_file }}"
dest: "{{ kube_config_dir }}/cloud-sa.json"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube_control_plane'][0]
- name: GCP PD CSI Driver | Get base64 cloud-sa.json
slurp:
src: "{{ kube_config_dir }}/cloud-sa.json"
register: gcp_cred_secret
when: inventory_hostname == groups['kube_control_plane'][0]
- name: GCP PD CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: gcp-pd-csi-cred-secret, file: gcp-pd-csi-cred-secret.yml}
- {name: gcp-pd-csi-setup, file: gcp-pd-csi-setup.yml}
- {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
- {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
- {name: gcp-pd-csi-sc-regional, file: gcp-pd-csi-sc-regional.yml}
- {name: gcp-pd-csi-sc-zonal, file: gcp-pd-csi-sc-zonal.yml}
register: gcp_pd_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: GCP PD CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ gcp_pd_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,165 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-gce-pd-controller
namespace: kube-system
spec:
replicas: {{ gcp_pd_csi_controller_replicas }}
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
# Host network must be used for interaction with Workload Identity in GKE
# since it replaces GCE Metadata Server with GKE Metadata Server. Remove
# this requirement when issue is resolved and before any exposure of
# metrics ports
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: csi-gce-pd-controller-sa
priorityClassName: csi-gce-pd-controller
containers:
- name: csi-provisioner
image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--feature-gates=Topology=true"
- "--http-endpoint=:22011"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=250s"
- "--extra-create-metadata"
# - "--run-controller-service=false" # disable the controller service of the CSI driver
# - "--run-node-service=false" # disable the node service of the CSI driver
- "--leader-election"
- "--default-fstype=ext4"
- "--controller-publish-readonly=true"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 22011
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--http-endpoint=:22012"
- "--leader-election"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=250s"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 22012
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--http-endpoint=:22013"
- "--leader-election"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--handle-volume-inuse-error=false"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 22013
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--metrics-address=:22014"
- "--leader-election"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=300s"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: gce-pd-driver
# Don't change base image without changing pdImagePlaceholder in
# test/k8s-integration/main.go
image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }}
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/cloud-sa/cloud-sa.json"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cloud-sa-volume
readOnly: true
mountPath: "/etc/cloud-sa"
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-sa-volume
secret:
secretName: cloud-sa
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: pd.csi.storage.gke.io
spec:
attachRequired: true
podInfoOnMount: false

View File

@@ -0,0 +1,8 @@
---
kind: Secret
apiVersion: v1
metadata:
name: cloud-sa
namespace: kube-system
data:
cloud-sa.json: {{ gcp_cred_secret.content }}

View File

@@ -0,0 +1,112 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-gce-pd-node
namespace: kube-system
spec:
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
# Host network must be used for interaction with Workload Identity in GKE
# since it replaces GCE Metadata Server with GKE Metadata Server. Remove
# this requirement when issue is resolved and before any exposure of
# metrics ports.
hostNetwork: true
priorityClassName: csi-gce-pd-node
serviceAccountName: csi-gce-pd-node-sa
containers:
- name: csi-driver-registrar
image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock"
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/pd.csi.storage.gke.io /registration/pd.csi.storage.gke.io-reg.sock"]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: gce-pd-driver
securityContext:
privileged: true
# Don't change base image without changing pdImagePlaceholder in
# test/k8s-integration/main.go
image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }}
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
- "--run-controller-service=false"
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
mountPath: /etc/udev
- name: udev-rules-lib
mountPath: /lib/udev
- name: udev-socket
mountPath: /run/udev
- name: sys
mountPath: /sys
nodeSelector:
kubernetes.io/os: linux
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/
type: DirectoryOrCreate
- name: device-dir
hostPath:
path: /dev
type: Directory
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
hostPath:
path: /etc/udev
type: Directory
- name: udev-rules-lib
hostPath:
path: /lib/udev
type: Directory
- name: udev-socket
hostPath:
path: /run/udev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations:
- operator: Exists

View File

@@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-gce-pd-regional
provisioner: pd.csi.storage.gke.io
parameters:
type: pd-balanced
replication-type: regional-pd
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-gce-pd-zonal
provisioner: pd.csi.storage.gke.io
parameters:
type: pd-balanced
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,241 @@
##### Node Service Account, Roles, RoleBindings
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-gce-pd-node-sa
namespace: kube-system
---
##### Controller Service Account, Roles, Rolebindings
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-gce-pd-controller-sa
namespace: kube-system
---
# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
# Access to volumeattachments is only needed when the CSI driver
# has the PUBLISH_UNPUBLISH_VOLUME controller capability.
# In that case, external-provisioner will watch volumeattachments
# to determine when it is safe to delete a volume.
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-attacher-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: csi-gce-pd-controller
value: 900000000
globalDefault: false
description: "This priority class should be used for the GCE PD CSI driver controller deployment only."
---
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: csi-gce-pd-node
value: 900001000
globalDefault: false
description: "This priority class should be used for the GCE PD CSI driver node deployment only."
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# If handle-volume-inuse-error=true, the pod specific rbac is needed
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-resizer-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: csi-gce-pd-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-node-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: csi-gce-pd-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# Secrets resource omitted since GCE PD snapshots does not require them
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-leaderelection-role
namespace: kube-system
labels:
k8s-app: gcp-compute-persistent-disk-csi-driver
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-leaderelection-binding
namespace: kube-system
labels:
k8s-app: gcp-compute-persistent-disk-csi-driver
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: Role
name: csi-gce-pd-leaderelection-role
apiGroup: rbac.authorization.k8s.io