This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
reviewers:
- alijahnas
- luckySB

View File

@@ -0,0 +1,11 @@
---
aws_ebs_csi_enable_volume_scheduling: true
aws_ebs_csi_enable_volume_snapshot: false
aws_ebs_csi_enable_volume_resizing: false
aws_ebs_csi_controller_replicas: 1
aws_ebs_csi_plugin_image_tag: latest
# Add annotions to ebs_csi_controller. Useful if using kube2iam for role assumption
# aws_ebs_csi_annotations:
# - key: iam.amazonaws.com/role
# value: your-ebs-role-arn

View File

@@ -0,0 +1,26 @@
---
- name: AWS CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: aws-ebs-csi-driver, file: aws-ebs-csi-driver.yml}
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice-rbac.yml}
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
- {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
register: aws_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: AWS CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ aws_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,180 @@
# Controller Service
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-csi-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# The permissions in this ClusterRole are tightly coupled with the version of csi-attacher used. More information about this can be found in kubernetes-csi/external-attacher.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-attacher-role
apiGroup: rbac.authorization.k8s.io
{% if aws_ebs_csi_enable_volume_snapshot %}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
{% endif %}
{% if aws_ebs_csi_enable_volume_resizing %}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-resizer-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-resizer-role
apiGroup: rbac.authorization.k8s.io
{% endif %}

View File

@@ -0,0 +1,132 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: {{ aws_ebs_csi_controller_replicas }}
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
labels:
app: ebs-csi-controller
app.kubernetes.io/name: aws-ebs-csi-driver
{% if aws_ebs_csi_annotations is defined %}
annotations:
{% for annotation in aws_ebs_csi_annotations %}
{{ annotation.key }}: {{ annotation.value }}
{% endfor %}
{% endif %}
spec:
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ebs-csi-controller-sa
priorityClassName: system-cluster-critical
containers:
- name: ebs-plugin
image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }}
args:
- --endpoint=$(CSI_ENDPOINT)
{% if aws_ebs_csi_extra_volume_tags is defined %}
- --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }}
{% endif %}
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: key_id
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: access_key
optional: true
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
- name: csi-provisioner
image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
{% if aws_ebs_csi_enable_volume_scheduling %}
- --feature-gates=Topology=true
{% endif %}
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{% if aws_ebs_csi_enable_volume_snapshot %}
- name: csi-snapshotter
image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --timeout=15s
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{% endif %}
{% if aws_ebs_csi_enable_volume_resizing %}
- name: csi-resizer
image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{% endif %}
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -0,0 +1,8 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false

View File

@@ -0,0 +1,101 @@
---
# Node Service
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
labels:
app: ebs-csi-node
app.kubernetes.io/name: aws-ebs-csi-driver
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
priorityClassName: system-node-critical
containers:
- name: ebs-plugin
securityContext:
privileged: true
image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }}
args:
- --endpoint=$(CSI_ENDPOINT)
{% if aws_ebs_csi_extra_volume_tags is defined %}
- --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }}
{% endif %}
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
- name: node-driver-registrar
image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"]
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory

View File

@@ -0,0 +1,6 @@
---
azure_csi_use_instance_metadata: true
azure_csi_controller_replicas: 2
azure_csi_plugin_image_tag: latest
azure_csi_controller_affinity: {}
azure_csi_node_affinity: {}

View File

@@ -0,0 +1,54 @@
---
- name: Azure CSI Driver | check azure_csi_tenant_id value
fail:
msg: "azure_csi_tenant_id is missing"
when: azure_csi_tenant_id is not defined or not azure_csi_tenant_id
- name: Azure CSI Driver | check azure_csi_subscription_id value
fail:
msg: "azure_csi_subscription_id is missing"
when: azure_csi_subscription_id is not defined or not azure_csi_subscription_id
- name: Azure CSI Driver | check azure_csi_aad_client_id value
fail:
msg: "azure_csi_aad_client_id is missing"
when: azure_csi_aad_client_id is not defined or not azure_csi_aad_client_id
- name: Azure CSI Driver | check azure_csi_aad_client_secret value
fail:
msg: "azure_csi_aad_client_secret is missing"
when: azure_csi_aad_client_secret is not defined or not azure_csi_aad_client_secret
- name: Azure CSI Driver | check azure_csi_resource_group value
fail:
msg: "azure_csi_resource_group is missing"
when: azure_csi_resource_group is not defined or not azure_csi_resource_group
- name: Azure CSI Driver | check azure_csi_location value
fail:
msg: "azure_csi_location is missing"
when: azure_csi_location is not defined or not azure_csi_location
- name: Azure CSI Driver | check azure_csi_subnet_name value
fail:
msg: "azure_csi_subnet_name is missing"
when: azure_csi_subnet_name is not defined or not azure_csi_subnet_name
- name: Azure CSI Driver | check azure_csi_security_group_name value
fail:
msg: "azure_csi_security_group_name is missing"
when: azure_csi_security_group_name is not defined or not azure_csi_security_group_name
- name: Azure CSI Driver | check azure_csi_vnet_name value
fail:
msg: "azure_csi_vnet_name is missing"
when: azure_csi_vnet_name is not defined or not azure_csi_vnet_name
- name: Azure CSI Driver | check azure_csi_vnet_resource_group value
fail:
msg: "azure_csi_vnet_resource_group is missing"
when: azure_csi_vnet_resource_group is not defined or not azure_csi_vnet_resource_group
- name: "Azure CSI Driver | check azure_csi_use_instance_metadata is a bool"
assert:
that: azure_csi_use_instance_metadata | type_debug == 'bool'

View File

@@ -0,0 +1,44 @@
---
- include_tasks: azure-credential-check.yml
- name: Azure CSI Driver | Write Azure CSI cloud-config
template:
src: "azure-csi-cloud-config.j2"
dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Azure CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/azure_csi_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Azure CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: azure-csi-azuredisk-driver, file: azure-csi-azuredisk-driver.yml}
- {name: azure-csi-cloud-config-secret, file: azure-csi-cloud-config-secret.yml}
- {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller-rbac.yml}
- {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller.yml}
- {name: azure-csi-azuredisk-node-rbac, file: azure-csi-azuredisk-node-rbac.yml}
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
register: azure_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Azure CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ azure_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,230 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-azuredisk-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-cluster-driver-registrar-role
rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csidrivers"]
verbs: ["create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-driver-registrar-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-cluster-driver-registrar-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-resizer-role
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-resizer-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azuredisk-controller-secret-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,179 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-azuredisk-controller
namespace: kube-system
spec:
replicas: {{ azure_csi_controller_replicas }}
selector:
matchLabels:
app: csi-azuredisk-controller
template:
metadata:
labels:
app: csi-azuredisk-controller
spec:
hostNetwork: true
serviceAccountName: csi-azuredisk-controller-sa
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
effect: "NoSchedule"
{% if azure_csi_controller_affinity %}
affinity:
{{ azure_csi_controller_affinity | to_nice_yaml | indent(width=8) }}
{% endif %}
containers:
- name: csi-provisioner
image: {{ azure_csi_image_repo }}/csi-provisioner:{{ azure_csi_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--feature-gates=Topology=true"
- "--csi-address=$(ADDRESS)"
- "--v=2"
- "--timeout=15s"
- "--leader-election"
- "--worker-threads=40"
- "--extra-create-metadata=true"
- "--strict-topology=true"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-attacher
image: {{ azure_csi_image_repo }}/csi-attacher:{{ azure_csi_attacher_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "-v=2"
- "-csi-address=$(ADDRESS)"
- "-timeout=600s"
- "-leader-election"
- "-worker-threads=500"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-snapshotter
image: {{ azure_csi_image_repo }}/csi-snapshotter:{{ azure_csi_snapshotter_image_tag }}
args:
- "-csi-address=$(ADDRESS)"
- "-leader-election"
- "-v=2"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-resizer
image: {{ azure_csi_image_repo }}/csi-resizer:{{ azure_csi_resizer_image_tag }}
args:
- "-csi-address=$(ADDRESS)"
- "-v=2"
- "-leader-election"
- '-handle-volume-inuse-error=false'
- "-timeout=60s"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }}
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29602
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azuredisk
image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metrics-address=0.0.0.0:29604"
- "--disable-avset-nodes=true"
- "--drivername=disk.csi.azure.com"
- "--cloud-config-secret-name=cloud-config"
- "--cloud-config-secret-namespace=kube-system"
ports:
- containerPort: 29602
name: healthz
protocol: TCP
- containerPort: 29604
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: AZURE_CREDENTIAL_FILE
value: "/etc/kubernetes/azure.json"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /etc/kubernetes/
name: azure-cred
readOnly: true
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
emptyDir: {}
- name: azure-cred
secret:
secretName: cloud-config

View File

@@ -0,0 +1,10 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: disk.csi.azure.com
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes: # added in Kubernetes 1.16
- Persistent

View File

@@ -0,0 +1,30 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-azuredisk-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-node-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-node-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azuredisk-node-secret-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,168 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-azuredisk-node
namespace: kube-system
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-azuredisk-node
template:
metadata:
labels:
app: csi-azuredisk-node
spec:
hostNetwork: true
dnsPolicy: Default
serviceAccountName: csi-azuredisk-node-sa
nodeSelector:
kubernetes.io/os: linux
{% if azure_csi_node_affinity %}
affinity:
{{ azure_csi_node_affinity | to_nice_yaml | indent(width=8) }}
{% endif %}
priorityClassName: system-node-critical
tolerations:
- operator: Exists
containers:
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29603
- --v=2
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: {{ azure_csi_image_repo }}/csi-node-driver-registrar:{{ azure_csi_node_registrar_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=2
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azuredisk
image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
- "--metrics-address=0.0.0.0:29605"
- "--enable-perf-optimization=true"
- "--drivername=disk.csi.azure.com"
- "--volume-attach-limit=-1"
- "--cloud-config-secret-name=cloud-config"
- "--cloud-config-secret-namespace=kube-system"
ports:
- containerPort: 29603
name: healthz
protocol: TCP
- containerPort: 29605
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: AZURE_CREDENTIAL_FILE
value: "/etc/kubernetes/azure.json"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /etc/kubernetes/
name: azure-cred
- mountPath: /dev
name: device-dir
- mountPath: /sys/bus/scsi/devices
name: sys-devices-dir
- mountPath: /sys/class/scsi_host/
name: scsi-host-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/disk.csi.azure.com
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
name: registration-dir
- secret:
defaultMode: 0644
secretName: cloud-config
name: azure-cred
- hostPath:
path: /dev
type: Directory
name: device-dir
- hostPath:
path: /sys/bus/scsi/devices
type: Directory
name: sys-devices-dir
- hostPath:
path: /sys/class/scsi_host/
type: Directory
name: scsi-host-dir

View File

@@ -0,0 +1,7 @@
kind: Secret
apiVersion: v1
metadata:
name: cloud-config
namespace: kube-system
data:
azure.json: {{ cloud_config_secret.content }}

View File

@@ -0,0 +1,14 @@
{
"cloud":"AzurePublicCloud",
"tenantId": "{{ azure_csi_tenant_id }}",
"subscriptionId": "{{ azure_csi_subscription_id }}",
"aadClientId": "{{ azure_csi_aad_client_id }}",
"aadClientSecret": "{{ azure_csi_aad_client_secret }}",
"location": "{{ azure_csi_location }}",
"resourceGroup": "{{ azure_csi_resource_group }}",
"vnetName": "{{ azure_csi_vnet_name }}",
"vnetResourceGroup": "{{ azure_csi_vnet_resource_group }}",
"subnetName": "{{ azure_csi_subnet_name }}",
"securityGroupName": "{{ azure_csi_security_group_name }}",
"useInstanceMetadata": {{ azure_csi_use_instance_metadata }},
}

View File

@@ -0,0 +1,30 @@
---
# To access Cinder, the CSI controller will need credentials to access
# openstack apis. Per default this values will be
# read from the environment.
cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
cinder_username: "{{ lookup('env','OS_USERNAME') }}"
cinder_password: "{{ lookup('env','OS_PASSWORD') }}"
cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}"
cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}"
cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}"
cinder_region: "{{ lookup('env','OS_REGION_NAME') }}"
cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}"
cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}"
cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
cinder_cacert: "{{ lookup('env','OS_CACERT') }}"
# For now, only Cinder v3 is supported in Cinder CSI driver
cinder_blockstorage_version: "v3"
cinder_csi_controller_replicas: 1
# Optional. Set to true, to rescan block device and verify its size before expanding
# the filesystem.
# Not all hypervizors have a /sys/class/block/XXX/device/rescan location, therefore if
# you enable this option and your hypervizor doesn't support this, you'll get a warning
# log on resize event. It is recommended to disable this option in this case.
# Defaults to false
# cinder_csi_rescan_on_resize: true
cinder_tolerations: []

View File

@@ -0,0 +1,59 @@
---
- name: Cinder CSI Driver | check cinder_auth_url value
fail:
msg: "cinder_auth_url is missing"
when: cinder_auth_url is not defined or not cinder_auth_url
- name: Cinder CSI Driver | check cinder_username value cinder_application_credential_name value
fail:
msg: "you must either set cinder_username or cinder_application_credential_name"
when:
- cinder_username is not defined or not cinder_username
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- name: Cinder CSI Driver | check cinder_application_credential_id value
fail:
msg: "cinder_application_credential_id is missing"
when:
- cinder_application_credential_name is defined
- cinder_application_credential_name|length > 0
- cinder_application_credential_id is not defined or not cinder_application_credential_id
- name: Cinder CSI Driver | check cinder_application_credential_secret value
fail:
msg: "cinder_application_credential_secret is missing"
when:
- cinder_application_credential_name is defined
- cinder_application_credential_name|length > 0
- cinder_application_credential_secret is not defined or not cinder_application_credential_secret
- name: Cinder CSI Driver | check cinder_password value
fail:
msg: "cinder_password is missing"
when:
- cinder_username is defined
- cinder_username|length > 0
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- cinder_application_credential_secret is not defined or not cinder_application_credential_secret
- cinder_password is not defined or not cinder_password
- name: Cinder CSI Driver | check cinder_region value
fail:
msg: "cinder_region is missing"
when: cinder_region is not defined or not cinder_region
- name: Cinder CSI Driver | check cinder_tenant_id value
fail:
msg: "one of cinder_tenant_id or cinder_tenant_name must be specified"
when:
- cinder_tenant_id is not defined or not cinder_tenant_id
- cinder_tenant_name is not defined or not cinder_tenant_name
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- name: Cinder CSI Driver | check cinder_domain_id value
fail:
msg: "one of cinder_domain_id or cinder_domain_name must be specified"
when:
- cinder_domain_id is not defined or not cinder_domain_id
- cinder_domain_name is not defined or not cinder_domain_name
- cinder_application_credential_name is not defined or not cinder_application_credential_name

View File

@@ -0,0 +1,11 @@
---
# include to workaround mitogen issue
# https://github.com/dw/mitogen/issues/663
- name: Cinder CSI Driver | Write cacert file
copy:
src: "{{ cinder_cacert }}"
dest: "{{ kube_config_dir }}/cinder-cacert.pem"
group: "{{ kube_cert_group }}"
mode: 0640
delegate_to: "{{ delegate_host_to_write_cacert }}"

View File

@@ -0,0 +1,56 @@
---
- include_tasks: cinder-credential-check.yml
- name: Cinder CSI Driver | Write cacert file
include_tasks: cinder-write-cacert.yml
run_once: true
loop: "{{ groups['k8s_cluster'] }}"
loop_control:
loop_var: delegate_host_to_write_cacert
when:
- inventory_hostname in groups['k8s_cluster']
- cinder_cacert is defined
- cinder_cacert | length > 0
- name: Cinder CSI Driver | Write Cinder cloud-config
template:
src: "cinder-csi-cloud-config.j2"
dest: "{{ kube_config_dir }}/cinder_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cinder CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/cinder_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cinder CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: cinder-csi-driver, file: cinder-csi-driver.yml}
- {name: cinder-csi-cloud-config-secret, file: cinder-csi-cloud-config-secret.yml}
- {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin-rbac.yml}
- {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin.yml}
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin-rbac.yml}
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
- {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
register: cinder_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cinder CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ cinder_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,10 @@
# This YAML file contains secret objects,
# which are necessary to run csi cinder plugin.
kind: Secret
apiVersion: v1
metadata:
name: cloud-config
namespace: kube-system
data:
cloud.conf: {{ cloud_config_secret.content }}

View File

@@ -0,0 +1,44 @@
[Global]
auth-url="{{ cinder_auth_url }}"
{% if cinder_application_credential_id|length == 0 and cinder_application_credential_name|length == 0 %}
username="{{ cinder_username }}"
password="{{ cinder_password }}"
{% endif %}
{% if cinder_application_credential_id|length > 0 %}
application-credential-id={{ cinder_application_credential_id }}
{% endif %}
{% if cinder_application_credential_name|length > 0 %}
application-credential-name={{ cinder_application_credential_name }}
{% endif %}
{% if cinder_application_credential_secret|length > 0 %}
application-credential-secret={{ cinder_application_credential_secret }}
{% endif %}
region="{{ cinder_region }}"
{% if cinder_tenant_id|length > 0 %}
tenant-id="{{ cinder_tenant_id }}"
{% endif %}
{% if cinder_tenant_name|length > 0 %}
tenant-name="{{ cinder_tenant_name }}"
{% endif %}
{% if cinder_domain_name|length > 0 %}
domain-name="{{ cinder_domain_name }}"
{% elif cinder_domain_id|length > 0 %}
domain-id ="{{ cinder_domain_id }}"
{% endif %}
{% if cinder_cacert|length > 0 %}
ca-file="{{ kube_config_dir }}/cinder-cacert.pem"
{% endif %}
[BlockStorage]
{% if cinder_blockstorage_version is defined %}
bs-version={{ cinder_blockstorage_version }}
{% endif %}
{% if cinder_csi_ignore_volume_az is defined %}
ignore-volume-az={{ cinder_csi_ignore_volume_az | bool }}
{% endif %}
{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %}
node-volume-attach-limit="{{ node_volume_attach_limit }}"
{% endif %}
{% if cinder_csi_rescan_on_resize is defined %}
rescan-on-resize={{ cinder_csi_rescan_on_resize | bool }}
{% endif %}

View File

@@ -0,0 +1,179 @@
# This YAML file contains RBAC API objects,
# which are necessary to run csi controller plugin
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-controller-sa
namespace: kube-system
---
# external attacher
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# external Provisioner
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# external snapshotter
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
# External Resizer
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-resizer-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,156 @@
# This YAML file contains CSI Controller Plugin Sidecars
# external-attacher, external-provisioner, external-snapshotter
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-cinder-controllerplugin
namespace: kube-system
spec:
replicas: {{ cinder_csi_controller_replicas }}
selector:
matchLabels:
app: csi-cinder-controllerplugin
template:
metadata:
labels:
app: csi-cinder-controllerplugin
spec:
serviceAccountName: csi-cinder-controller-sa
containers:
- name: csi-attacher
image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- --leader-election=true
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-provisioner
image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--default-fstype=ext4"
- "--extra-create-metadata"
{% if cinder_topology is defined and cinder_topology %}
- --feature-gates=Topology=true
{% endif %}
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- "--leader-election=true"
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshotter
image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--extra-create-metadata"
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- --leader-election=true
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: csi-resizer
image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--handle-volume-inuse-error=false"
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- --leader-election=true
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: cinder-csi-plugin
image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--cluster=$(CLUSTER_NAME)"
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
- name: CLUSTER_NAME
value: kubernetes
ports:
- containerPort: 9808
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 60
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
- name: ca-certs
mountPath: /etc/ssl/certs
readOnly: true
{% if cinder_cacert is defined and cinder_cacert != "" %}
- name: cinder-cacert
mountPath: {{ kube_config_dir }}/cinder-cacert.pem
readOnly: true
{% endif %}
volumes:
- name: socket-dir
emptyDir:
- name: secret-cinderplugin
secret:
secretName: cloud-config
- name: ca-certs
hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
{% if cinder_cacert is defined and cinder_cacert != "" %}
- name: cinder-cacert
hostPath:
path: {{ kube_config_dir }}/cinder-cacert.pem
type: FileOrCreate
{% endif %}

View File

@@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: cinder.csi.openstack.org
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
- Ephemeral

View File

@@ -0,0 +1,38 @@
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-nodeplugin-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,130 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
# which are necessary to run csi nodeplugin for cinder.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-cinder-nodeplugin
namespace: kube-system
spec:
selector:
matchLabels:
app: csi-cinder-nodeplugin
template:
metadata:
labels:
app: csi-cinder-nodeplugin
spec:
serviceAccountName: csi-cinder-node-sa
hostNetwork: true
containers:
- name: node-driver-registrar
image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
args:
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cinder-csi-plugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
ports:
- containerPort: 9808
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: pods-probe-dir
mountPath: /dev
mountPropagation: "HostToContainer"
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
- name: ca-certs
mountPath: /etc/ssl/certs
readOnly: true
{% if cinder_cacert is defined and cinder_cacert != "" %}
- name: cinder-cacert
mountPath: {{ kube_config_dir }}/cinder-cacert.pem
readOnly: true
{% endif %}
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/cinder.csi.openstack.org
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: pods-probe-dir
hostPath:
path: /dev
type: Directory
- name: secret-cinderplugin
secret:
secretName: cloud-config
- name: ca-certs
hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
{% if cinder_cacert is defined and cinder_cacert != "" %}
- name: cinder-cacert
hostPath:
path: {{ kube_config_dir }}/cinder-cacert.pem
type: FileOrCreate
{% endif %}
{% if cinder_tolerations %}
tolerations:
{{ cinder_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
{% endif %}

View File

@@ -0,0 +1,14 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: cinder-csi-pdb
namespace: kube-system
spec:
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
minAvailable: 1
{% else %}
minAvailable: 0
{% endif %}
selector:
matchLabels:
app: csi-cinder-controllerplugin

View File

@@ -0,0 +1,26 @@
---
- name: CSI CRD | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: volumesnapshotclasses, file: volumesnapshotclasses.yml}
- {name: volumesnapshotcontents, file: volumesnapshotcontents.yml}
- {name: volumesnapshots, file: volumesnapshots.yml}
register: csi_crd_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: CSI CRD | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
wait: true
with_items:
- "{{ csi_crd_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,116 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
creationTimestamp: null
name: volumesnapshotclasses.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotClass
listKind: VolumeSnapshotClassList
plural: volumesnapshotclasses
singular: volumesnapshotclass
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .driver
name: Driver
type: string
- description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .deletionPolicy
name: DeletionPolicy
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
served: true
storage: true
subresources: {}
- additionalPrinterColumns:
- jsonPath: .driver
name: Driver
type: string
- description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .deletionPolicy
name: DeletionPolicy
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
# This indicates the v1beta1 version of the custom resource is deprecated.
# API requests to this version receive a warning in the server response.
deprecated: true
# This overrides the default warning returned to clients making v1beta1 API requests.
deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass"
schema:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
served: true
storage: false
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@@ -0,0 +1,305 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
creationTimestamp: null
name: volumesnapshotcontents.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotContent
listKind: VolumeSnapshotContentList
plural: volumesnapshotcontents
singular: volumesnapshotcontent
scope: Cluster
versions:
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: Represents the complete size of the snapshot in bytes
jsonPath: .status.restoreSize
name: RestoreSize
type: integer
- description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .spec.deletionPolicy
name: DeletionPolicy
type: string
- description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
jsonPath: .spec.driver
name: Driver
type: string
- description: Name of the VolumeSnapshotClass to which this snapshot belongs.
jsonPath: .spec.volumeSnapshotClassName
name: VolumeSnapshotClass
type: string
- description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.name
name: VolumeSnapshot
type: string
- description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.namespace
name: VolumeSnapshotNamespace
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
type: string
source:
description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
type: string
type: object
oneOf:
- required: ["snapshotHandle"]
- required: ["volumeHandle"]
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
format: int64
type: integer
error:
description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: Represents the complete size of the snapshot in bytes
jsonPath: .status.restoreSize
name: RestoreSize
type: integer
- description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .spec.deletionPolicy
name: DeletionPolicy
type: string
- description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
jsonPath: .spec.driver
name: Driver
type: string
- description: Name of the VolumeSnapshotClass to which this snapshot belongs.
jsonPath: .spec.volumeSnapshotClassName
name: VolumeSnapshotClass
type: string
- description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.name
name: VolumeSnapshot
type: string
- description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.namespace
name: VolumeSnapshotNamespace
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
# This indicates the v1beta1 version of the custom resource is deprecated.
# API requests to this version receive a warning in the server response.
deprecated: true
# This overrides the default warning returned to clients making v1beta1 API requests.
deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent"
schema:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
type: string
source:
description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
type: string
type: object
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
format: int64
type: integer
error:
description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
type: string
type: object
required:
- spec
type: object
served: true
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@@ -0,0 +1,231 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
creationTimestamp: null
name: volumesnapshots.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshot
listKind: VolumeSnapshotList
plural: volumesnapshots
singular: volumesnapshot
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
jsonPath: .spec.source.persistentVolumeClaimName
name: SourcePVC
type: string
- description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
jsonPath: .spec.source.volumeSnapshotContentName
name: SourceSnapshotContent
type: string
- description: Represents the minimum size of volume required to rehydrate from this snapshot.
jsonPath: .status.restoreSize
name: RestoreSize
type: string
- description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
jsonPath: .spec.volumeSnapshotClassName
name: SnapshotClass
type: string
- description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
jsonPath: .status.boundVolumeSnapshotContentName
name: SnapshotContent
type: string
- description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
jsonPath: .status.creationTime
name: CreationTime
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
properties:
source:
description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
type: string
type: object
oneOf:
- required: ["persistentVolumeClaimName"]
- required: ["volumeSnapshotContentName"]
volumeSnapshotClassName:
description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
type: string
required:
- source
type: object
status:
description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
properties:
boundVolumeSnapshotContentName:
description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
type: string
description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
jsonPath: .spec.source.persistentVolumeClaimName
name: SourcePVC
type: string
- description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
jsonPath: .spec.source.volumeSnapshotContentName
name: SourceSnapshotContent
type: string
- description: Represents the minimum size of volume required to rehydrate from this snapshot.
jsonPath: .status.restoreSize
name: RestoreSize
type: string
- description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
jsonPath: .spec.volumeSnapshotClassName
name: SnapshotClass
type: string
- description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
jsonPath: .status.boundVolumeSnapshotContentName
name: SnapshotContent
type: string
- description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
jsonPath: .status.creationTime
name: CreationTime
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
# This indicates the v1beta1 version of the custom resource is deprecated.
# API requests to this version receive a warning in the server response.
deprecated: true
# This overrides the default warning returned to clients making v1beta1 API requests.
deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot"
schema:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
properties:
source:
description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
type: string
type: object
volumeSnapshotClassName:
description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
type: string
required:
- source
type: object
status:
description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
properties:
boundVolumeSnapshotContentName:
description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
type: string
description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
required:
- spec
type: object
served: true
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@@ -0,0 +1,2 @@
---
gcp_pd_csi_controller_replicas: 1

View File

@@ -0,0 +1,45 @@
---
- name: GCP PD CSI Driver | Check if cloud-sa.json exists
fail:
msg: "Credentials file cloud-sa.json is mandatory"
when: gcp_pd_csi_sa_cred_file is not defined or not gcp_pd_csi_sa_cred_file
- name: GCP PD CSI Driver | Copy GCP credentials file
copy:
src: "{{ gcp_pd_csi_sa_cred_file }}"
dest: "{{ kube_config_dir }}/cloud-sa.json"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube_control_plane'][0]
- name: GCP PD CSI Driver | Get base64 cloud-sa.json
slurp:
src: "{{ kube_config_dir }}/cloud-sa.json"
register: gcp_cred_secret
when: inventory_hostname == groups['kube_control_plane'][0]
- name: GCP PD CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: gcp-pd-csi-cred-secret, file: gcp-pd-csi-cred-secret.yml}
- {name: gcp-pd-csi-setup, file: gcp-pd-csi-setup.yml}
- {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
- {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
register: gcp_pd_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: GCP PD CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ gcp_pd_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,75 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-gce-pd-controller
namespace: kube-system
spec:
serviceName: "csi-gce-pd"
replicas: {{ gcp_pd_csi_controller_replicas }}
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
# Host network must be used for interaction with Workload Identity in GKE
# since it replaces GCE Metadata Server with GKE Metadata Server. Remove
# this requirement when issue is resolved and before any exposure of
# metrics ports
hostNetwork: true
serviceAccountName: csi-gce-pd-controller-sa
priorityClassName: csi-gce-pd-controller
containers:
- name: csi-provisioner
image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--feature-gates=Topology=true"
- "--default-fstype=ext4"
# - "--run-controller-service=false" # disable the controller service of the CSI driver
# - "--run-node-service=false" # disable the node service of the CSI driver
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: gce-pd-driver
# Don't change base image without changing pdImagePlaceholder in
# test/k8s-integration/main.go
image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }}
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/cloud-sa/cloud-sa.json"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cloud-sa-volume
readOnly: true
mountPath: "/etc/cloud-sa"
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-sa-volume
secret:
secretName: cloud-sa
volumeClaimTemplates: []

View File

@@ -0,0 +1,8 @@
---
kind: Secret
apiVersion: v1
metadata:
name: cloud-sa
namespace: kube-system
data:
cloud-sa.json: {{ gcp_cred_secret.content }}

View File

@@ -0,0 +1,111 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-gce-pd-node
namespace: kube-system
spec:
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
# Host network must be used for interaction with Workload Identity in GKE
# since it replaces GCE Metadata Server with GKE Metadata Server. Remove
# this requirement when issue is resolved and before any exposure of
# metrics ports.
hostNetwork: true
priorityClassName: csi-gce-pd-node
serviceAccountName: csi-gce-pd-node-sa
containers:
- name: csi-driver-registrar
image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }}
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock"
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/pd.csi.storage.gke.io /registration/pd.csi.storage.gke.io-reg.sock"]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: gce-pd-driver
securityContext:
privileged: true
# Don't change base image without changing pdImagePlaceholder in
# test/k8s-integration/main.go
image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }}
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
mountPath: /etc/udev
- name: udev-rules-lib
mountPath: /lib/udev
- name: udev-socket
mountPath: /run/udev
- name: sys
mountPath: /sys
nodeSelector:
kubernetes.io/os: linux
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/
type: DirectoryOrCreate
- name: device-dir
hostPath:
path: /dev
type: Directory
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
hostPath:
path: /etc/udev
type: Directory
- name: udev-rules-lib
hostPath:
path: /lib/udev
type: Directory
- name: udev-socket
hostPath:
path: /run/udev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations:
- operator: Exists

View File

@@ -0,0 +1,200 @@
##### Node Service Account, Roles, RoleBindings
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-gce-pd-node-sa
namespace: kube-system
---
##### Controller Service Account, Roles, Rolebindings
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-gce-pd-controller-sa
namespace: kube-system
---
# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-attacher-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: csi-gce-pd-controller
value: 900000000
globalDefault: false
description: "This priority class should be used for the GCE PD CSI driver controller deployment only."
---
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: csi-gce-pd-node
value: 900001000
globalDefault: false
description: "This priority class should be used for the GCE PD CSI driver node deployment only."
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-gce-pd-resizer-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: csi-gce-pd-node-psp
spec:
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
fsGroup:
rule: RunAsAny
privileged: true
volumes:
- '*'
hostNetwork: true
allowedHostPaths:
- pathPrefix: "/var/lib/kubelet/plugins_registry/"
- pathPrefix: "/var/lib/kubelet"
- pathPrefix: "/var/lib/kubelet/plugins/pd.csi.storage.gke.io/"
- pathPrefix: "/dev"
- pathPrefix: "/etc/udev"
- pathPrefix: "/lib/udev"
- pathPrefix: "/run/udev"
- pathPrefix: "/sys"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-node-deploy
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- csi-gce-pd-node-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: csi-gce-pd-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-node-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-node-sa
namespace: kube-system

View File

@@ -0,0 +1,16 @@
---
upcloud_csi_controller_replicas: 1
upcloud_csi_provisioner_image_tag: "v3.1.0"
upcloud_csi_attacher_image_tag: "v3.4.0"
upcloud_csi_resizer_image_tag: "v1.4.0"
upcloud_csi_plugin_image_tag: "v0.3.3"
upcloud_csi_node_image_tag: "v2.5.0"
upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}"
upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}"
upcloud_tolerations: []
upcloud_csi_enable_volume_snapshot: false
upcloud_csi_snapshot_controller_replicas: 2
upcloud_csi_snapshotter_image_tag: "v4.2.1"
upcloud_csi_snapshot_controller_image_tag: "v4.2.1"
upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1"
upcloud_cacert: "{{ lookup('env','OS_CACERT') }}"

View File

@@ -0,0 +1,40 @@
---
- name: UpCloud CSI Driver | Check if UPCLOUD_USERNAME exists
fail:
msg: "UpCloud username is missing. Env UPCLOUD_USERNAME is mandatory"
when: upcloud_username is not defined or not upcloud_username
- name: UpCloud CSI Driver | Check if UPCLOUD_PASSWORD exists
fail:
msg: "UpCloud password is missing. Env UPCLOUD_PASSWORD is mandatory"
when:
- upcloud_username is defined
- upcloud_username|length > 0
- upcloud_password is not defined or not upcloud_password
- name: UpCloud CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: upcloud-csi-cred-secret, file: upcloud-csi-cred-secret.yml}
- {name: upcloud-csi-setup, file: upcloud-csi-setup.yml}
- {name: upcloud-csi-controller, file: upcloud-csi-controller.yml}
- {name: upcloud-csi-node, file: upcloud-csi-node.yml}
- {name: upcloud-csi-driver, file: upcloud-csi-driver.yml}
register: upcloud_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: UpCloud CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ upcloud_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,93 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-upcloud-controller
namespace: kube-system
spec:
serviceName: "csi-upcloud"
replicas: {{ upcloud_csi_controller_replicas }}
selector:
matchLabels:
app: csi-upcloud-controller
template:
metadata:
labels:
app: csi-upcloud-controller
role: csi-upcloud
spec:
priorityClassName: system-cluster-critical
serviceAccount: csi-upcloud-controller-sa
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:{{ upcloud_csi_provisioner_image_tag }}
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=600s"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: registry.k8s.io/sig-storage/csi-attacher:{{ upcloud_csi_attacher_image_tag }}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--timeout=120s"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:{{ upcloud_csi_resizer_image_tag }}
args:
- "--v=5"
- "--timeout=120s"
- "--csi-address=$(ADDRESS)"
- "--handle-volume-inuse-error=true"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-upcloud-plugin
image: ghcr.io/upcloudltd/upcloud-csi:{{ upcloud_csi_plugin_image_tag }}
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodehost=$(NODE_ID)"
- "--username=$(UPCLOUD_USERNAME)"
- "--password=$(UPCLOUD_PASSWORD)"
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: UPCLOUD_USERNAME
valueFrom:
secretKeyRef:
name: upcloud
key: username
- name: UPCLOUD_PASSWORD
valueFrom:
secretKeyRef:
name: upcloud
key: password
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
imagePullSecrets:
- name: regcred
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -0,0 +1,9 @@
---
apiVersion: v1
kind: Secret
metadata:
name: upcloud
namespace: kube-system
stringData:
username: {{ upcloud_username }}
password: {{ upcloud_password }}

View File

@@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: storage.csi.upcloud.com
spec:
attachRequired: true
podInfoOnMount: true
fsGroupPolicy: File

View File

@@ -0,0 +1,101 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-upcloud-node
namespace: kube-system
spec:
selector:
matchLabels:
app: csi-upcloud-node
template:
metadata:
labels:
app: csi-upcloud-node
role: csi-upcloud
spec:
priorityClassName: system-node-critical
serviceAccount: csi-upcloud-node-sa
hostNetwork: true
containers:
- name: csi-node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:{{ upcloud_csi_node_image_tag }}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/storage.csi.upcloud.com/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi/
- name: registration-dir
mountPath: /registration/
- name: csi-upcloud-plugin
image: ghcr.io/upcloudltd/upcloud-csi:{{ upcloud_csi_plugin_image_tag }}
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodehost=$(NODE_ID)"
- "--username=$(UPCLOUD_USERNAME)"
- "--password=$(UPCLOUD_PASSWORD)"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: UPCLOUD_USERNAME
valueFrom:
secretKeyRef:
name: upcloud
key: username
- name: UPCLOUD_PASSWORD
valueFrom:
secretKeyRef:
name: upcloud
key: password
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: "Always"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet
# needed so that any mounts setup inside this container are
# propagated back to the host machine.
mountPropagation: "Bidirectional"
- name: device-dir
mountPath: /dev
imagePullSecrets:
- name: regcred
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/storage.csi.upcloud.com
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: device-dir
hostPath:
path: /dev
{% if upcloud_tolerations %}
tolerations:
{{ upcloud_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
{% endif %}

View File

@@ -0,0 +1,185 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: csi-upcloud-controller-sa
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-upcloud-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-node-driver-registrar-role
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-node-driver-registrar-binding
subjects:
- kind: ServiceAccount
name: csi-upcloud-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-upcloud-node-driver-registrar-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-provisioner-role
rules:
- apiGroups: [ "" ]
resources: [ "secrets" ]
verbs: [ "get", "list" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csinodes" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "list", "watch", "create", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "nodes" ]
verbs: [ "get", "list", "watch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-upcloud-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-upcloud-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# Attacher must be able to work with PVs, nodes and VolumeAttachments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-attacher-role
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "nodes" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "csinodes" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "volumeattachments" ]
verbs: [ "get", "list", "watch", "update", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "volumeattachments/status" ]
verbs: [ "get", "list", "watch", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-upcloud-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-upcloud-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# Provisioner must be able to work with endpoints and leases in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: csi-upcloud-provisioner-cfg-role
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role-cfg-binding
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-upcloud-controller-sa
namespace: kube-system
roleRef:
kind: Role
name: csi-upcloud-provisioner-cfg-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-resizer-role
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims/status" ]
verbs: [ "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "list", "watch", "create", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "watch", "list" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-upcloud-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-upcloud-resizer-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,37 @@
---
external_vsphere_vcenter_port: "443"
external_vsphere_insecure: "true"
external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
external_vsphere_version: "7.0u1"
vsphere_syncer_image_tag: "v2.5.1"
vsphere_csi_attacher_image_tag: "v3.4.0"
vsphere_csi_controller: "v2.5.1"
vsphere_csi_liveness_probe_image_tag: "v2.6.0"
vsphere_csi_provisioner_image_tag: "v3.1.0"
vsphere_csi_snapshotter_image_tag: "v5.0.1"
vsphere_csi_node_driver_registrar_image_tag: "v2.5.0"
vsphere_csi_driver_image_tag: "v2.5.1"
vsphere_csi_resizer_tag: "v1.4.0"
# Set to kube-system for backward compatibility, should be change to vmware-system-csi on the long run
vsphere_csi_namespace: "kube-system"
vsphere_csi_controller_replicas: 1
csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}'
vsphere_csi_aggressive_node_drain: False
vsphere_csi_aggressive_node_unreachable_timeout: 300
vsphere_csi_aggressive_node_not_ready_timeout: 300
vsphere_csi_node_affinity: {}
# If this is true, debug information will be displayed but
# may contain some private data, so it is recommended to set it to false
# in the production environment.
unsafe_show_logs: false
# https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/docs/book/features/volume_snapshot.md#how-to-enable-volume-snapshot--restore-feature-in-vsphere-csi-
# according to the above link , we can controler the block-volume-snapshot parameter
vsphere_csi_block_volume_snapshot: false

View File

@@ -0,0 +1,54 @@
---
- include_tasks: vsphere-credentials-check.yml
- name: vSphere CSI Driver | Generate CSI cloud-config
template:
src: "{{ item }}.j2"
dest: "{{ kube_config_dir }}/{{ item }}"
mode: 0640
with_items:
- vsphere-csi-cloud-config
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Generate Manifests
template:
src: "{{ item }}.j2"
dest: "{{ kube_config_dir }}/{{ item }}"
mode: 0644
with_items:
- vsphere-csi-namespace.yml
- vsphere-csi-driver.yml
- vsphere-csi-controller-rbac.yml
- vsphere-csi-node-rbac.yml
- vsphere-csi-controller-config.yml
- vsphere-csi-controller-deployment.yml
- vsphere-csi-controller-service.yml
- vsphere-csi-node.yml
register: vsphere_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Generate a CSI secret manifest
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs|bool) }}"
- name: vSphere CSI Driver | Apply a CSI secret manifest
command:
cmd: "{{ kubectl }} apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs|bool) }}"
- name: vSphere CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item }}"
state: "latest"
with_items:
- "{{ vsphere_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item }}"

View File

@@ -0,0 +1,38 @@
---
- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value
fail:
msg: "external_vsphere_vcenter_ip is missing"
when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip
- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value
fail:
msg: "external_vsphere_vcenter_port is missing"
when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port
- name: External vSphere Cloud Provider | check external_vsphere_insecure value
fail:
msg: "external_vsphere_insecure is missing"
when: external_vsphere_insecure is not defined or not external_vsphere_insecure
- name: External vSphere Cloud Provider | check external_vsphere_user value
fail:
msg: "external_vsphere_user is missing"
when: external_vsphere_user is not defined or not external_vsphere_user
- name: External vSphere Cloud Provider | check external_vsphere_password value
fail:
msg: "external_vsphere_password is missing"
when:
- external_vsphere_password is not defined or not external_vsphere_password
- name: External vSphere Cloud Provider | check external_vsphere_datacenter value
fail:
msg: "external_vsphere_datacenter is missing"
when:
- external_vsphere_datacenter is not defined or not external_vsphere_datacenter
- name: External vSphere Cloud Provider | check external_vsphere_kubernetes_cluster_id value
fail:
msg: "external_vsphere_kubernetes_cluster_id is missing"
when:
- external_vsphere_kubernetes_cluster_id is not defined or not external_vsphere_kubernetes_cluster_id

View File

@@ -0,0 +1,9 @@
[Global]
cluster-id = "{{ external_vsphere_kubernetes_cluster_id }}"
[VirtualCenter "{{ external_vsphere_vcenter_ip }}"]
insecure-flag = "{{ external_vsphere_insecure }}"
user = "{{ external_vsphere_user }}"
password = "{{ external_vsphere_password }}"
port = "{{ external_vsphere_vcenter_port }}"
datacenters = "{{ external_vsphere_datacenter }}"

View File

@@ -0,0 +1,24 @@
apiVersion: v1
data:
"csi-migration": "false"
{% if external_vsphere_version >= "7.0" %}
"csi-auth-check": "true"
{% else %}
"csi-auth-check": "false"
{% endif %}
"online-volume-extend": "true"
"trigger-csi-fullsync": "false"
"async-query-volume": "true"
"improved-csi-idempotency": "true"
"improved-volume-topology": "true"
"block-volume-snapshot": "{{ vsphere_csi_block_volume_snapshot }}"
"csi-windows-support": "false"
{% if vsphere_csi_controller is version('v2.5.0', '>=') %}
"use-csinode-id": "true"
"pv-to-backingdiskobjectid-mapping": "false"
"cnsmgr-suspend-create-volume": "false"
{% endif %}
kind: ConfigMap
metadata:
name: internal-feature-states.csi.vsphere.vmware.com
namespace: "{{ vsphere_csi_namespace }}"

View File

@@ -0,0 +1,220 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: vsphere-csi-controller
namespace: "{{ vsphere_csi_namespace }}"
spec:
replicas: {{ vsphere_csi_controller_replicas }}
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 0
selector:
matchLabels:
app: vsphere-csi-controller
template:
metadata:
labels:
app: vsphere-csi-controller
role: vsphere-csi
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- vsphere-csi-controller
topologyKey: "kubernetes.io/hostname"
serviceAccountName: vsphere-csi-controller
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: "Exists"
key: node-role.kubernetes.io/master
effect: NoSchedule
- operator: "Exists"
key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% if vsphere_csi_aggressive_node_drain %}
# set below toleration if you need an aggressive pod eviction in case when
# node becomes not-ready or unreachable. Default is 300 seconds if not specified.
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoExecute
tolerationSeconds: {{ vsphere_csi_aggressive_node_not_ready_timeout }}
- key: node.kubernetes.io/unreachable
operator: Exists
effect: NoExecute
tolerationSeconds: {{ vsphere_csi_aggressive_node_unreachable_timeout }}
{% endif %}
dnsPolicy: "Default"
containers:
- name: csi-attacher
image: {{ kube_image_repo }}/sig-storage/csi-attacher:{{ vsphere_csi_attacher_image_tag }}
args:
- "--v=4"
- "--timeout=300s"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--kube-api-qps=100"
- "--kube-api-burst=100"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
{% if external_vsphere_version >= "7.0" %}
- name: csi-resizer
image: {{ kube_image_repo }}/sig-storage/csi-resizer:{{ vsphere_csi_resizer_tag }}
args:
- "--v=4"
- "--timeout=300s"
- "--csi-address=$(ADDRESS)"
- "--handle-volume-inuse-error=false"
- "--kube-api-qps=100"
- "--kube-api-burst=100"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
{% endif %}
- name: vsphere-csi-controller
image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_controller }}
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace={{ vsphere_csi_namespace }}"
- "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
- "--use-gocsi=false"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: CSI_ENDPOINT
value: unix://{{ csi_endpoint }}/csi.sock
- name: X_CSI_MODE
value: "controller"
- name: X_CSI_SPEC_DISABLE_LEN_CHECK
value: "true"
- name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT
value: 3m
- name: VSPHERE_CSI_CONFIG
value: "/etc/cloud/csi-vsphere.conf"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
{% if external_vsphere_version >= "7.0u1" %}
- name: INCLUSTER_CLIENT_QPS
value: "100"
- name: INCLUSTER_CLIENT_BURST
value: "100"
{% endif %}
volumeMounts:
- mountPath: /etc/cloud
name: vsphere-config-volume
readOnly: true
- mountPath: {{ csi_endpoint }}
name: socket-dir
ports:
- name: healthz
containerPort: 9808
protocol: TCP
- name: prometheus
containerPort: 2112
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
- name: liveness-probe
image: {{ kube_image_repo }}/sig-storage/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }}
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: {{ csi_endpoint }}/csi.sock
volumeMounts:
- name: socket-dir
mountPath: {{ csi_endpoint }}
- name: vsphere-syncer
image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/syncer:{{ vsphere_syncer_image_tag }}
args:
- "--leader-election"
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace={{ vsphere_csi_namespace }}"
- "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 2113
name: prometheus
protocol: TCP
env:
- name: FULL_SYNC_INTERVAL_MINUTES
value: "30"
- name: VSPHERE_CSI_CONFIG
value: "/etc/cloud/csi-vsphere.conf"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
{% if external_vsphere_version >= "7.0u1" %}
- name: INCLUSTER_CLIENT_QPS
value: "100"
- name: INCLUSTER_CLIENT_BURST
value: "100"
{% endif %}
volumeMounts:
- mountPath: /etc/cloud
name: vsphere-config-volume
readOnly: true
- name: csi-provisioner
image: {{ kube_image_repo }}/sig-storage/csi-provisioner:{{ vsphere_csi_provisioner_image_tag }}
args:
- "--v=4"
- "--timeout=300s"
- "--csi-address=$(ADDRESS)"
{% if vsphere_csi_controller is version('v2.2.0', '>=') %}
- "--kube-api-qps=100"
- "--kube-api-burst=100"
{% endif %}
- "--leader-election"
- "--default-fstype=ext4"
# needed only for topology aware setup
#- "--feature-gates=Topology=true"
#- "--strict-topology"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
{% if vsphere_csi_controller is version('v2.5.0', '>=') %}
- name: csi-snapshotter
image: {{ kube_image_repo }}/sig-storage/csi-snapshotter:{{ vsphere_csi_snapshotter_image_tag }}
args:
- "--v=4"
- "--kube-api-qps=100"
- "--kube-api-burst=100"
- "--timeout=300s"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
{% endif %}
volumes:
- name: vsphere-config-volume
secret:
secretName: vsphere-config-secret
- name: socket-dir
emptyDir: {}

View File

@@ -0,0 +1,86 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: vsphere-csi-controller
namespace: "{{ vsphere_csi_namespace }}"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-controller-role
rules:
- apiGroups: [""]
resources: ["nodes", "pods", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
{% if external_vsphere_version >= "7.0" %}
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
{% if external_vsphere_version >= "7.0u1" %}
verbs: ["patch"]
{% else %}
verbs: ["update", "patch"]
{% endif %}
{% endif %}
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
{% if vsphere_csi_controller is version('v2.0.0', '>=') %}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{% endif %}
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses","csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["cns.vmware.com"]
resources: ["triggercsifullsyncs"]
verbs: ["create", "get", "update", "watch", "list"]
- apiGroups: ["cns.vmware.com"]
resources: ["cnsvspherevolumemigrations"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "create", "update"]
- apiGroups: ["cns.vmware.com"]
resources: ["cnsvolumeoperationrequests"]
verbs: ["create", "get", "list", "update", "delete"]
- apiGroups: [ "cns.vmware.com" ]
resources: [ "csinodetopologies" ]
verbs: ["get", "update", "watch", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshots" ]
verbs: [ "get", "list" ]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshotclasses" ]
verbs: [ "watch", "get", "list" ]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshotcontents" ]
verbs: [ "create", "get", "list", "watch", "update", "delete", "patch" ]
- apiGroups: [ "snapshot.storage.k8s.io" ]
resources: [ "volumesnapshotcontents/status" ]
verbs: [ "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-controller-binding
subjects:
- kind: ServiceAccount
name: vsphere-csi-controller
namespace: "{{ vsphere_csi_namespace }}"
roleRef:
kind: ClusterRole
name: vsphere-csi-controller-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: vsphere-csi-controller
namespace: "{{ vsphere_csi_namespace }}"
labels:
app: vsphere-csi-controller
spec:
ports:
- name: ctlr
port: 2112
targetPort: 2112
protocol: TCP
- name: syncer
port: 2113
targetPort: 2113
protocol: TCP
selector:
app: vsphere-csi-controller

View File

@@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.vsphere.vmware.com
spec:
attachRequired: true
podInfoOnMount: false

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: "{{ vsphere_csi_namespace }}"

View File

@@ -0,0 +1,55 @@
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: vsphere-csi-node
namespace: "{{ vsphere_csi_namespace }}"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-cluster-role
rules:
- apiGroups: ["cns.vmware.com"]
resources: ["csinodetopologies"]
verbs: ["create", "watch", "get", "patch" ]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-cluster-role-binding
subjects:
- kind: ServiceAccount
name: vsphere-csi-node
namespace: "{{ vsphere_csi_namespace }}"
roleRef:
kind: ClusterRole
name: vsphere-csi-node-cluster-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-role
namespace: "{{ vsphere_csi_namespace }}"
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-binding
namespace: "{{ vsphere_csi_namespace }}"
subjects:
- kind: ServiceAccount
name: vsphere-csi-node
namespace: "{{ vsphere_csi_namespace }}"
roleRef:
kind: Role
name: vsphere-csi-node-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,157 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: vsphere-csi-node
namespace: "{{ vsphere_csi_namespace }}"
spec:
selector:
matchLabels:
app: vsphere-csi-node
updateStrategy:
type: "RollingUpdate"
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
app: vsphere-csi-node
role: vsphere-csi
spec:
nodeSelector:
kubernetes.io/os: linux
{% if vsphere_csi_node_affinity %}
affinity:
{{ vsphere_csi_node_affinity | to_nice_yaml | indent(width=8) }}
{% endif %}
serviceAccountName: vsphere-csi-node
hostNetwork: true
dnsPolicy: "ClusterFirstWithHostNet"
containers:
- name: node-driver-registrar
image: {{ kube_image_repo }}/sig-storage/csi-node-driver-registrar:{{ vsphere_csi_node_driver_registrar_image_tag }}
{% if external_vsphere_version < "7.0u1" %}
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"]
{% endif %}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock
- --mode=kubelet-registration-probe
initialDelaySeconds: 3
- name: vsphere-csi-node
image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_driver_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- "--fss-namespace={{ vsphere_csi_namespace }}"
- "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
- "--use-gocsi=false"
imagePullPolicy: "Always"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: MAX_VOLUMES_PER_NODE
value: "59" # Maximum number of volumes that controller can publish to the node. If value is not set or zero Kubernetes decide how many volumes can be published by the controller to the node.
- name: X_CSI_MODE
value: "node"
- name: X_CSI_SPEC_REQ_VALIDATION
value: "false"
- name: X_CSI_DEBUG
value: "true"
- name: X_CSI_SPEC_DISABLE_LEN_CHECK
value: "true"
- name: LOGGER_LEVEL
value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION
- name: NODEGETINFO_WATCH_TIMEOUT_MINUTES
value: "1"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet
# needed so that any mounts setup inside this container are
# propagated back to the host machine.
mountPropagation: "Bidirectional"
- name: device-dir
mountPath: /dev
- name: blocks-dir
mountPath: /sys/block
- name: sys-devices-dir
mountPath: /sys/devices
ports:
- containerPort: 9808
name: healthz
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 5
periodSeconds: 5
failureThreshold: 3
- name: liveness-probe
image: {{ kube_image_repo }}/sig-storage/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }}
args:
{% if external_vsphere_version >= "7.0u1" %}
- "--v=4"
{% endif %}
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: device-dir
hostPath:
path: /dev
- name: blocks-dir
hostPath:
path: /sys/block
type: Directory
- name: sys-devices-dir
hostPath:
path: /sys/devices
type: Directory
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists