This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,10 @@
---
cephfs_provisioner_namespace: "cephfs-provisioner"
cephfs_provisioner_cluster: ceph
cephfs_provisioner_monitors: ~
cephfs_provisioner_admin_id: admin
cephfs_provisioner_secret: secret
cephfs_provisioner_storage_class: cephfs
cephfs_provisioner_reclaim_policy: Delete
cephfs_provisioner_claim_root: /volumes
cephfs_provisioner_deterministic_names: true

View File

@@ -0,0 +1,80 @@
---
- name: CephFS Provisioner | Remove legacy addon dir and manifests
file:
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
state: absent
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- name: CephFS Provisioner | Remove legacy namespace
command: >
{{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- name: CephFS Provisioner | Remove legacy storageclass
command: >
{{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- name: CephFS Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
state: directory
owner: root
group: root
mode: 0755
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: CephFS Provisioner | Templates list
set_fact:
cephfs_provisioner_templates:
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
- { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret }
- { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa }
- { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole }
- { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding }
- { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role }
- { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
- { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: deploy }
- { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
cephfs_provisioner_templates_for_psp:
- { name: psp-cephfs-provisioner, file: psp-cephfs-provisioner.yml, type: psp }
- name: CephFS Provisioner | Append extra templates to CephFS Provisioner Templates list for PodSecurityPolicy
set_fact:
cephfs_provisioner_templates: "{{ cephfs_provisioner_templates_for_psp + cephfs_provisioner_templates }}"
when:
- podsecuritypolicy_enabled
- cephfs_provisioner_namespace != "kube-system"
- name: CephFS Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
mode: 0644
with_items: "{{ cephfs_provisioner_templates }}"
register: cephfs_provisioner_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: CephFS Provisioner | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ cephfs_provisioner_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ cephfs_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ cephfs_provisioner_namespace }}
labels:
name: {{ cephfs_provisioner_namespace }}

View File

@@ -0,0 +1,26 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "delete"]
- apiGroups: ["policy"]
resourceNames: ["cephfs-provisioner"]
resources: ["podsecuritypolicies"]
verbs: ["use"]

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cephfs-provisioner
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
roleRef:
kind: ClusterRole
name: cephfs-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,34 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
labels:
app: cephfs-provisioner
version: {{ cephfs_provisioner_image_tag }}
spec:
replicas: 1
selector:
matchLabels:
app: cephfs-provisioner
version: {{ cephfs_provisioner_image_tag }}
template:
metadata:
labels:
app: cephfs-provisioner
version: {{ cephfs_provisioner_image_tag }}
spec:
priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
serviceAccount: cephfs-provisioner
containers:
- name: cephfs-provisioner
image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: PROVISIONER_NAME
value: ceph.com/cephfs
command:
- "/usr/local/bin/cephfs-provisioner"
args:
- "-id=cephfs-provisioner-1"

View File

@@ -0,0 +1,44 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: cephfs-provisioner
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]

View File

@@ -0,0 +1,14 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cephfs-provisioner

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}

View File

@@ -0,0 +1,15 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ cephfs_provisioner_storage_class }}
provisioner: ceph.com/cephfs
reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }}
parameters:
cluster: {{ cephfs_provisioner_cluster }}
monitors: {{ cephfs_provisioner_monitors }}
adminId: {{ cephfs_provisioner_admin_id }}
adminSecretName: cephfs-provisioner
adminSecretNamespace: {{ cephfs_provisioner_namespace }}
claimRoot: {{ cephfs_provisioner_claim_root }}
deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}"

View File

@@ -0,0 +1,9 @@
---
kind: Secret
apiVersion: v1
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
type: Opaque
data:
secret: {{ cephfs_provisioner_secret | b64encode }}

View File

@@ -0,0 +1,9 @@
---
local_path_provisioner_enabled: false
local_path_provisioner_namespace: "local-path-storage"
local_path_provisioner_storage_class: "local-path"
local_path_provisioner_reclaim_policy: Delete
local_path_provisioner_claim_root: /opt/local-path-provisioner/
local_path_provisioner_is_default_storageclass: "true"
local_path_provisioner_debug: false
local_path_provisioner_helper_image_tag: "latest"

View File

@@ -0,0 +1,58 @@
---
- name: Local Path Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/local_path_provisioner"
state: directory
owner: root
group: root
mode: 0755
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Local Path Provisioner | Create claim root dir
file:
path: "{{ local_path_provisioner_claim_root }}"
state: directory
mode: 0755
- name: Local Path Provisioner | Render Template
set_fact:
local_path_provisioner_templates:
- { name: local-path-storage-ns, file: local-path-storage-ns.yml, type: ns }
- { name: local-path-storage-sa, file: local-path-storage-sa.yml, type: sa }
- { name: local-path-storage-cr, file: local-path-storage-cr.yml, type: cr }
- { name: local-path-storage-clusterrolebinding, file: local-path-storage-clusterrolebinding.yml, type: clusterrolebinding }
- { name: local-path-storage-cm, file: local-path-storage-cm.yml, type: cm }
- { name: local-path-storage-deployment, file: local-path-storage-deployment.yml, type: deployment }
- { name: local-path-storage-sc, file: local-path-storage-sc.yml, type: sc }
local_path_provisioner_templates_for_psp_not_system_ns:
- { name: local-path-storage-psp, file: local-path-storage-psp.yml, type: psp }
- { name: local-path-storage-psp-role, file: local-path-storage-psp-cr.yml, type: clusterrole }
- { name: local-path-storage-psp-rb, file: local-path-storage-psp-rb.yml, type: rolebinding }
- name: Local Path Provisioner | Insert extra templates to Local Path Provisioner templates list for PodSecurityPolicy
set_fact:
local_path_provisioner_templates: "{{ local_path_provisioner_templates[:3] + local_path_provisioner_templates_for_psp_not_system_ns + local_path_provisioner_templates[3:] }}"
when:
- podsecuritypolicy_enabled
- local_path_provisioner_namespace != "kube-system"
- name: Local Path Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}"
mode: 0644
with_items: "{{ local_path_provisioner_templates }}"
register: local_path_provisioner_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Local Path Provisioner | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ local_path_provisioner_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_path_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: {{ local_path_provisioner_namespace }}

View File

@@ -0,0 +1,59 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: {{ local_path_provisioner_namespace }}
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["{{ local_path_provisioner_claim_root }}"]
}
]
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: {% if local_path_provisioner_helper_image_repo is defined %}{{ local_path_provisioner_helper_image_repo }}:{{ local_path_provisioner_helper_image_tag }}{% else %}busybox{% endif %}

View File

@@ -0,0 +1,18 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]

View File

@@ -0,0 +1,41 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: {{ local_path_provisioner_namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: {{ local_path_provisioner_image_repo }}:{{ local_path_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- local-path-provisioner
- start
- --config
- /etc/config/config.json
{% if local_path_provisioner_debug|default(false) %}
- --debug
{% endif %}
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ local_path_provisioner_namespace }}

View File

@@ -0,0 +1,15 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:local-path-provisioner
namespace: {{ local_path_provisioner_namespace }}
rules:
- apiGroups:
- policy
resourceNames:
- local-path-provisioner
resources:
- podsecuritypolicies
verbs:
- use

View File

@@ -0,0 +1,14 @@
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:local-path-provisioner
namespace: {{ local_path_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: {{ local_path_provisioner_namespace }}
roleRef:
kind: ClusterRole
name: psp:local-path-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,43 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: local-path-provisioner
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: true
allowPrivilegeEscalation: true
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'secret'
- 'downwardAPI'
- 'hostPath'
allowedHostPaths:
- pathPrefix: "{{ local_path_provisioner_claim_root }}"
readOnly: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: {{ local_path_provisioner_namespace }}

View File

@@ -0,0 +1,10 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ local_path_provisioner_storage_class }}
annotations:
storageclass.kubernetes.io/is-default-class: "{{ local_path_provisioner_is_default_storageclass }}"
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: {{ local_path_provisioner_reclaim_policy }}

View File

@@ -0,0 +1,20 @@
---
local_volume_provisioner_namespace: "kube-system"
# List of node labels to be copied to the PVs created by the provisioner
local_volume_provisioner_nodelabels: []
# - kubernetes.io/hostname
# - topology.kubernetes.io/region
# - topology.kubernetes.io/zone
local_volume_provisioner_tolerations: []
local_volume_provisioner_use_node_name_only: false
# Leverages Ansible's string to Python datatype casting. Otherwise the dict_key isn't substituted.
# see https://github.com/ansible/ansible/issues/17324
local_volume_provisioner_storage_classes: |
{
"{{ local_volume_provisioner_storage_class | default('local-storage') }}": {
"host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}",
"mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}",
"volume_mode": "Filesystem",
"fs_type": "ext4"
}
}

View File

@@ -0,0 +1,12 @@
---
# include to workaround mitogen issue
# https://github.com/dw/mitogen/issues/663
- name: "Local Volume Provisioner | Ensure base dir {{ delegate_host_base_dir.1 }} is created on {{ delegate_host_base_dir.0 }}"
file:
path: "{{ local_volume_provisioner_storage_classes[delegate_host_base_dir.1].host_dir }}"
state: directory
owner: root
group: root
mode: "{{ local_volume_provisioner_directory_mode }}"
delegate_to: "{{ delegate_host_base_dir.0 }}"

View File

@@ -0,0 +1,48 @@
---
- name: Local Volume Provisioner | Ensure base dir is created on all hosts
include_tasks: basedirs.yml
loop_control:
loop_var: delegate_host_base_dir
loop: "{{ groups['k8s_cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}"
- name: Local Volume Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/local_volume_provisioner"
state: directory
owner: root
group: root
mode: 0755
- name: Local Volume Provisioner | Templates list
set_fact:
local_volume_provisioner_templates:
- { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns }
- { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa }
- { name: local-volume-provisioner-clusterrole, file: local-volume-provisioner-clusterrole.yml, type: clusterrole }
- { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
- { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type: cm }
- { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type: ds }
- { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type: sc }
- name: Local Volume Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
mode: 0644
with_items: "{{ local_volume_provisioner_templates }}"
register: local_volume_provisioner_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Local Volume Provisioner | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ local_volume_provisioner_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_volume_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,22 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-volume-provisioner-node-clusterrole
namespace: {{ local_volume_provisioner_namespace }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["watch"]
- apiGroups: ["", "events.k8s.io"]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]

View File

@@ -0,0 +1,14 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-volume-provisioner-system-node
namespace: {{ local_volume_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
roleRef:
kind: ClusterRole
name: local-volume-provisioner-node-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,33 @@
# Macro to convert camelCase dictionary keys to snake_case keys
{% macro convert_keys(mydict) -%}
{% for key in mydict.keys()|list -%}
{% set key_split = key.split('_') -%}
{% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%}
{% set value = mydict.pop(key) -%}
{{ mydict.__setitem__(new_key, value) -}}
{{ convert_keys(value) if value is mapping else None -}}
{% endfor -%}
{% endmacro -%}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
data:
{% if local_volume_provisioner_nodelabels | length > 0 %}
nodeLabelsForPV: |
{% for nodelabel in local_volume_provisioner_nodelabels %}
- {{ nodelabel }}
{% endfor %}
{% endif %}
{% if local_volume_provisioner_use_node_name_only %}
useNodeNameOnly: "true"
{% endif %}
storageClassMap: |
{% for class_name, storage_class in local_volume_provisioner_storage_classes.items() %}
{{ class_name }}:
{{- convert_keys(storage_class) }}
{{ storage_class | to_nice_yaml(indent=2) | indent(6) }}
{%- endfor %}

View File

@@ -0,0 +1,66 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
labels:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
spec:
selector:
matchLabels:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
template:
metadata:
labels:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
spec:
priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
serviceAccountName: local-volume-provisioner
nodeSelector:
kubernetes.io/os: linux
{% if local_volume_provisioner_tolerations %}
tolerations:
{{ local_volume_provisioner_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
{% endif %}
containers:
- name: provisioner
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
securityContext:
privileged: true
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: local-volume-provisioner
mountPath: /etc/provisioner/config
readOnly: true
- mountPath: /dev
name: provisioner-dev
{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %}
- name: local-volume-provisioner-hostpath-{{ class_name }}
mountPath: {{ class_config.mount_dir }}
mountPropagation: "HostToContainer"
{% endfor %}
volumes:
- name: local-volume-provisioner
configMap:
name: local-volume-provisioner
- name: provisioner-dev
hostPath:
path: /dev
{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %}
- name: local-volume-provisioner-hostpath-{{ class_name }}
hostPath:
path: {{ class_config.host_dir }}
{% endfor %}

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ local_volume_provisioner_namespace }}
labels:
name: {{ local_volume_provisioner_namespace }}

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}

View File

@@ -0,0 +1,12 @@
{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ class_name }}
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
{% if class_config.reclaim_policy is defined %}
reclaimPolicy: {{ class_config.reclaim_policy }}
{% endif %}
{% endfor %}

View File

@@ -0,0 +1,30 @@
---
dependencies:
- role: kubernetes-apps/external_provisioner/local_volume_provisioner
when:
- local_volume_provisioner_enabled
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- apps
- local-volume-provisioner
- external-provisioner
- role: kubernetes-apps/external_provisioner/cephfs_provisioner
when: cephfs_provisioner_enabled
tags:
- apps
- cephfs-provisioner
- external-provisioner
- role: kubernetes-apps/external_provisioner/rbd_provisioner
when: rbd_provisioner_enabled
tags:
- apps
- rbd-provisioner
- external-provisioner
- role: kubernetes-apps/external_provisioner/local_path_provisioner
when: local_path_provisioner_enabled
tags:
- apps
- local-path-provisioner
- external-provisioner

View File

@@ -0,0 +1,17 @@
---
rbd_provisioner_namespace: "rbd-provisioner"
rbd_provisioner_replicas: 2
rbd_provisioner_monitors: ~
rbd_provisioner_pool: kube
rbd_provisioner_admin_id: admin
rbd_provisioner_secret_name: ceph-secret-admin
rbd_provisioner_secret: ceph-key-admin
rbd_provisioner_user_id: kube
rbd_provisioner_user_secret_name: ceph-secret-user
rbd_provisioner_user_secret: ceph-key-user
rbd_provisioner_user_secret_namespace: rbd-provisioner
rbd_provisioner_fs_type: ext4
rbd_provisioner_image_format: "2"
rbd_provisioner_image_features: layering
rbd_provisioner_storage_class: rbd
rbd_provisioner_reclaim_policy: Delete

View File

@@ -0,0 +1,80 @@
---
- name: RBD Provisioner | Remove legacy addon dir and manifests
file:
path: "{{ kube_config_dir }}/addons/rbd_provisioner"
state: absent
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- name: RBD Provisioner | Remove legacy namespace
command: >
{{ kubectl }} delete namespace {{ rbd_provisioner_namespace }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- name: RBD Provisioner | Remove legacy storageclass
command: >
{{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- name: RBD Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/rbd_provisioner"
state: directory
owner: root
group: root
mode: 0755
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: RBD Provisioner | Templates list
set_fact:
rbd_provisioner_templates:
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
- { name: secret-rbd-provisioner, file: secret-rbd-provisioner.yml, type: secret }
- { name: sa-rbd-provisioner, file: sa-rbd-provisioner.yml, type: sa }
- { name: clusterrole-rbd-provisioner, file: clusterrole-rbd-provisioner.yml, type: clusterrole }
- { name: clusterrolebinding-rbd-provisioner, file: clusterrolebinding-rbd-provisioner.yml, type: clusterrolebinding }
- { name: role-rbd-provisioner, file: role-rbd-provisioner.yml, type: role }
- { name: rolebinding-rbd-provisioner, file: rolebinding-rbd-provisioner.yml, type: rolebinding }
- { name: deploy-rbd-provisioner, file: deploy-rbd-provisioner.yml, type: deploy }
- { name: sc-rbd-provisioner, file: sc-rbd-provisioner.yml, type: sc }
rbd_provisioner_templates_for_psp:
- { name: psp-rbd-provisioner, file: psp-rbd-provisioner.yml, type: psp }
- name: RBD Provisioner | Append extra templates to RBD Provisioner Templates list for PodSecurityPolicy
set_fact:
rbd_provisioner_templates: "{{ rbd_provisioner_templates_for_psp + rbd_provisioner_templates }}"
when:
- podsecuritypolicy_enabled
- rbd_provisioner_namespace != "kube-system"
- name: RBD Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
mode: 0644
with_items: "{{ rbd_provisioner_templates }}"
register: rbd_provisioner_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: RBD Provisioner | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ rbd_provisioner_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ rbd_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ rbd_provisioner_namespace }}
labels:
name: {{ rbd_provisioner_namespace }}

View File

@@ -0,0 +1,30 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "delete"]
- apiGroups: ["policy"]
resourceNames: ["rbd-provisioner"]
resources: ["podsecuritypolicies"]
verbs: ["use"]

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,40 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}
labels:
app: rbd-provisioner
version: {{ rbd_provisioner_image_tag }}
spec:
replicas: {{ rbd_provisioner_replicas }}
strategy:
type: Recreate
selector:
matchLabels:
app: rbd-provisioner
version: {{ rbd_provisioner_image_tag }}
template:
metadata:
labels:
app: rbd-provisioner
version: {{ rbd_provisioner_image_tag }}
spec:
priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
serviceAccount: rbd-provisioner
containers:
- name: rbd-provisioner
image: {{ rbd_provisioner_image_repo }}:{{ rbd_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command:
- "/usr/local/bin/rbd-provisioner"
args:
- "-id=${POD_NAME}"

View File

@@ -0,0 +1,44 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: rbd-provisioner
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]

View File

@@ -0,0 +1,14 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
namespace: {{ rbd_provisioner_namespace }}

View File

@@ -0,0 +1,19 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ rbd_provisioner_storage_class }}
provisioner: ceph.com/rbd
reclaimPolicy: {{ rbd_provisioner_reclaim_policy }}
parameters:
monitors: {{ rbd_provisioner_monitors }}
adminId: {{ rbd_provisioner_admin_id }}
adminSecretNamespace: {{ rbd_provisioner_namespace }}
adminSecretName: {{ rbd_provisioner_secret_name }}
pool: {{ rbd_provisioner_pool }}
userId: {{ rbd_provisioner_user_id }}
userSecretNamespace: {{ rbd_provisioner_user_secret_namespace }}
userSecretName: {{ rbd_provisioner_user_secret_name }}
fsType: "{{ rbd_provisioner_fs_type }}"
imageFormat: "{{ rbd_provisioner_image_format }}"
imageFeatures: {{ rbd_provisioner_image_features }}

View File

@@ -0,0 +1,18 @@
---
kind: Secret
apiVersion: v1
metadata:
name: {{ rbd_provisioner_secret_name }}
namespace: {{ rbd_provisioner_namespace }}
type: Opaque
data:
secret: {{ rbd_provisioner_secret | b64encode }}
---
kind: Secret
apiVersion: v1
metadata:
name: {{ rbd_provisioner_user_secret_name }}
namespace: {{ rbd_provisioner_user_secret_namespace }}
type: Opaque
data:
key: {{ rbd_provisioner_user_secret | b64encode }}