This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,93 @@
---
# Limits for coredns
dns_memory_limit: 300Mi
dns_cpu_requests: 100m
dns_memory_requests: 70Mi
dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}"
dns_nodes_per_replica: 16
dns_cores_per_replica: 256
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}"
enable_coredns_reverse_dns_lookups: true
coredns_ordinal_suffix: ""
# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
coredns_deployment_nodeselector: "kubernetes.io/os: linux"
coredns_default_zone_cache_block: |
cache 30
# dns_upstream_forward_extra_opts apply to coredns forward section as well as nodelocaldns upstream target forward section
# dns_upstream_forward_extra_opts:
# policy: sequential
# nodelocaldns
nodelocaldns_cpu_requests: 100m
nodelocaldns_memory_limit: 200Mi
nodelocaldns_memory_requests: 70Mi
nodelocaldns_ds_nodeselector: "kubernetes.io/os: linux"
nodelocaldns_prometheus_port: 9253
nodelocaldns_secondary_prometheus_port: 9255
# Limits for dns-autoscaler
dns_autoscaler_cpu_requests: 20m
dns_autoscaler_memory_requests: 10Mi
dns_autoscaler_deployment_nodeselector: "kubernetes.io/os: linux"
# dns_autoscaler_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
# etcd metrics
# etcd_metrics_service_labels:
# k8s-app: etcd
# app.kubernetes.io/managed-by: Kubespray
# app: kube-prometheus-stack-kube-etcd
# release: prometheus-stack
# Netchecker
deploy_netchecker: false
netchecker_port: 31081
agent_report_interval: 15
netcheck_namespace: default
# Limits for netchecker apps
netchecker_agent_cpu_limit: 30m
netchecker_agent_memory_limit: 100M
netchecker_agent_cpu_requests: 15m
netchecker_agent_memory_requests: 64M
netchecker_server_cpu_limit: 100m
netchecker_server_memory_limit: 256M
netchecker_server_cpu_requests: 50m
netchecker_server_memory_requests: 64M
netchecker_etcd_cpu_limit: 200m
netchecker_etcd_memory_limit: 256M
netchecker_etcd_cpu_requests: 100m
netchecker_etcd_memory_requests: 128M
# SecurityContext when PodSecurityPolicy is enabled
netchecker_agent_user: 1000
netchecker_server_user: 1000
netchecker_agent_group: 1000
netchecker_server_group: 1000
# Dashboard
dashboard_replicas: 1
# Namespace for dashboard
dashboard_namespace: kube-system
# Limits for dashboard
dashboard_cpu_limit: 100m
dashboard_memory_limit: 256M
dashboard_cpu_requests: 50m
dashboard_memory_requests: 64M
# Set dashboard_use_custom_certs to true if overriding dashboard_certs_secret_name with a secret that
# contains dashboard_tls_key_file and dashboard_tls_cert_file instead of using the initContainer provisioned certs
dashboard_use_custom_certs: false
dashboard_certs_secret_name: kubernetes-dashboard-certs
dashboard_tls_key_file: dashboard.key
dashboard_tls_cert_file: dashboard.crt
dashboard_master_toleration: true
# Override dashboard default settings
dashboard_token_ttl: 900
dashboard_skip_login: false
# Policy Controllers
# policy_controller_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]

View File

@@ -0,0 +1,44 @@
---
- name: Kubernetes Apps | Register coredns deployment annotation `createdby`
command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
register: createdby_annotation_deploy
changed_when: false
check_mode: false
ignore_errors: true # noqa ignore-errors
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Register coredns service annotation `createdby`
command: "{{ kubectl }} get svc -n kube-system coredns -o jsonpath='{ .metadata.annotations.createdby }'"
register: createdby_annotation_svc
changed_when: false
check_mode: false
ignore_errors: true # noqa ignore-errors
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Delete kubeadm CoreDNS
kube:
name: "coredns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "deploy"
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- createdby_annotation_deploy.stdout != 'kubespray'
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
kube:
name: "kube-dns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "svc"
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- createdby_annotation_svc.stdout != 'kubespray'

View File

@@ -0,0 +1,44 @@
---
- name: Kubernetes Apps | Lay Down CoreDNS templates
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
loop:
- { name: coredns, file: coredns-clusterrole.yml, type: clusterrole }
- { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding }
- { name: coredns, file: coredns-config.yml, type: configmap }
- { name: coredns, file: coredns-deployment.yml, type: deployment }
- { name: coredns, file: coredns-sa.yml, type: sa }
- { name: coredns, file: coredns-svc.yml, type: svc }
- { name: dns-autoscaler, file: dns-autoscaler.yml, type: deployment }
- { name: dns-autoscaler, file: dns-autoscaler-clusterrole.yml, type: clusterrole }
- { name: dns-autoscaler, file: dns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
- { name: dns-autoscaler, file: dns-autoscaler-sa.yml, type: sa }
register: coredns_manifests
vars:
clusterIP: "{{ skydns_server }}"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template
template:
src: "{{ item.src }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment }
- { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc }
- { name: dns-autoscaler, src: dns-autoscaler.yml, file: coredns-autoscaler-secondary.yml, type: deployment }
register: coredns_secondary_manifests
vars:
clusterIP: "{{ skydns_server_secondary }}"
coredns_ordinal_suffix: "-secondary"
when:
- dns_mode == 'coredns_dual'
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns

View File

@@ -0,0 +1,21 @@
---
- name: Kubernetes Apps | Lay down dashboard template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
register: manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start dashboard
kube:
name: "{{ item.item.name }}"
namespace: "{{ dashboard_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,22 @@
---
- name: Kubernetes Apps | Lay down etcd_metrics templates
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { file: etcd_metrics-endpoints.yml, type: endpoints, name: etcd-metrics }
- { file: etcd_metrics-service.yml, type: service, name: etcd-metrics }
register: manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start etcd_metrics
kube:
name: "{{ item.item.name }}"
namespace: kube-system
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,82 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result
until: result.status == 200
retries: 20
delay: 1
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Cleanup DNS
import_tasks: cleanup_dns.yml
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- coredns
- nodelocaldns
- name: Kubernetes Apps | CoreDNS
import_tasks: "coredns.yml"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
- name: Kubernetes Apps | nodelocalDNS
import_tasks: "nodelocaldns.yml"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- name: Kubernetes Apps | Start Resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ coredns_manifests.results | default({}) }}"
- "{{ coredns_secondary_manifests.results | default({}) }}"
- "{{ nodelocaldns_manifests.results | default({}) }}"
- "{{ nodelocaldns_second_manifests.results | default({}) }}"
when:
- dns_mode != 'none'
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
register: resource_result
until: resource_result is succeeded
retries: 4
delay: 5
tags:
- coredns
- nodelocaldns
loop_control:
label: "{{ item.item.file }}"
- name: Kubernetes Apps | Etcd metrics endpoints
import_tasks: etcd_metrics.yml
when: etcd_metrics_port is defined and etcd_metrics_service_labels is defined
tags:
- etcd_metrics
- name: Kubernetes Apps | Netchecker
import_tasks: netchecker.yml
when: deploy_netchecker
tags:
- netchecker
- name: Kubernetes Apps | Dashboard
import_tasks: dashboard.yml
when: dashboard_enabled
tags:
- dashboard

View File

@@ -0,0 +1,56 @@
---
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
when:
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
set_fact:
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Netchecker Templates list
set_fact:
netchecker_templates:
- {file: netchecker-ns.yml, type: ns, name: netchecker-namespace}
- {file: netchecker-agent-sa.yml, type: sa, name: netchecker-agent}
- {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
- {file: netchecker-server-sa.yml, type: sa, name: netchecker-server}
- {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server}
- {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server}
- {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server}
- {file: netchecker-server-svc.yml, type: svc, name: netchecker-service}
netchecker_templates_for_psp:
- {file: netchecker-agent-hostnet-psp.yml, type: podsecuritypolicy, name: netchecker-agent-hostnet-policy}
- {file: netchecker-agent-hostnet-clusterrole.yml, type: clusterrole, name: netchecker-agent}
- {file: netchecker-agent-hostnet-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-agent}
- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
set_fact:
netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}"
when: podsecuritypolicy_enabled
- name: Kubernetes Apps | Lay Down Netchecker Template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items: "{{ netchecker_templates }}"
register: manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start Netchecker Resources
kube:
name: "{{ item.item.name }}"
namespace: "{{ netcheck_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -0,0 +1,75 @@
---
- name: Kubernetes Apps | set up necessary nodelocaldns parameters
set_fact:
primaryClusterIP: >-
{%- if dns_mode in ['coredns', 'coredns_dual'] -%}
{{ skydns_server }}
{%- elif dns_mode == 'manual' -%}
{{ manual_dns_server }}
{%- endif -%}
secondaryclusterIP: "{{ skydns_server_secondary }}"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns
- name: Kubernetes Apps | Lay Down nodelocaldns Template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { name: nodelocaldns, file: nodelocaldns-config.yml, type: configmap }
- { name: nodelocaldns, file: nodelocaldns-sa.yml, type: sa }
- { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset }
register: nodelocaldns_manifests
vars:
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
{%- else -%}
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- else -%}
/etc/resolv.conf
{%- endif -%}
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns
- name: Kubernetes Apps | Lay Down nodelocaldns-secondary Template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset }
register: nodelocaldns_second_manifests
vars:
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
{%- else -%}
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- else -%}
/etc/resolv.conf
{%- endif -%}
when:
- enable_nodelocaldns
- enable_nodelocaldns_secondary
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns

View File

@@ -0,0 +1,32 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch

View File

@@ -0,0 +1,18 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system

View File

@@ -0,0 +1,74 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %}
{% for block in coredns_external_zones %}
{{ block['zones'] | join(' ') }} {
log
errors
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
{% endif %}
forward . {{ block['nameservers'] | join(' ') }}
loadbalance
cache {{ block['cache'] | default(5) }}
reload
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endfor %}
{% endif %}
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes {{ dns_domain }} {% if enable_coredns_reverse_dns_lookups %}in-addr.arpa ip6.arpa {% endif %}{
pods insecure
{% if enable_coredns_k8s_endpoint_pod_names %}
endpoint_pod_names
{% endif %}
{% if enable_coredns_reverse_dns_lookups %}
fallthrough in-addr.arpa ip6.arpa
{% endif %}
}
prometheus :9153
forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} {
prefer_udp
max_concurrent 1000
{% if dns_upstream_forward_extra_opts is defined %}
{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %}
{{ optname }} {{ optvalue }}
{% endfor %}
{% endif %}
}
{% if enable_coredns_k8s_external %}
k8s_external {{ coredns_k8s_external_zone }}
{% endif %}
{{ coredns_default_zone_cache_block | indent(width=8, first=False) }}
loop
reload
loadbalance
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% if dns_etchosts | default(None) %}
hosts: |
{{ dns_etchosts | indent(width=4, first=False) }}
{% endif %}

View File

@@ -0,0 +1,119 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "coredns{{ coredns_ordinal_suffix }}"
namespace: kube-system
labels:
k8s-app: "kube-dns{{ coredns_ordinal_suffix }}"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}"
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 10%
selector:
matchLabels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
template:
metadata:
labels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
createdby: 'kubespray'
spec:
nodeSelector:
{{ coredns_deployment_nodeselector }}
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% if dns_extra_tolerations is defined %}
{{ dns_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }}
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ""
containers:
- name: coredns
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
{% if dns_etchosts | default(None) %}
- key: hosts
path: hosts
{% endif %}

View File

@@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,28 @@
---
apiVersion: v1
kind: Service
metadata:
name: coredns{{ coredns_ordinal_suffix }}
namespace: kube-system
labels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
createdby: 'kubespray'
spec:
selector:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
clusterIP: {{ clusterIP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@@ -0,0 +1,339 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
{% if dashboard_namespace != "kube-system" %}
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ dashboard_namespace }}
labels:
name: {{ dashboard_namespace }}
{% endif %}
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: {{ dashboard_namespace }}
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: {{ dashboard_namespace }}
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: {{ dashboard_namespace }}
type: Opaque
---
# ------------------- Dashboard ConfigMap ------------------- #
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: {{ dashboard_namespace }}
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
spec:
replicas: {{ dashboard_replicas }}
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dashboard_cpu_limit }}
memory: {{ dashboard_memory_limit }}
requests:
cpu: {{ dashboard_cpu_requests }}
memory: {{ dashboard_memory_requests }}
ports:
- containerPort: 8443
protocol: TCP
args:
- --namespace={{ dashboard_namespace }}
{% if dashboard_use_custom_certs %}
- --tls-key-file={{ dashboard_tls_key_file }}
- --tls-cert-file={{ dashboard_tls_cert_file }}
{% else %}
- --auto-generate-certificates
{% endif %}
{% if dashboard_skip_login %}
- --enable-skip-login
{% endif %}
- --authentication-mode=token
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
- --token-ttl={{ dashboard_token_ttl }}
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: {{ dashboard_certs_secret_name }}
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
{% if dashboard_master_toleration %}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% endif %}
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Metrics Scrapper Service Account ------------------- #
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# ------------------- Metrics Scrapper Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
name: dashboard-metrics-scraper
namespace: {{ dashboard_namespace }}
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: kubernetes-metrics-scraper
---
# ------------------- Metrics Scrapper Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
name: kubernetes-metrics-scraper
namespace: {{ dashboard_namespace }}
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-metrics-scraper
template:
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-metrics-scraper
image: {{ dashboard_metrics_scraper_repo }}:{{ dashboard_metrics_scraper_tag }}
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
serviceAccountName: kubernetes-dashboard
volumes:
- name: tmp-volume
emptyDir: {}
{% if dashboard_master_toleration %}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% endif %}

View File

@@ -0,0 +1,34 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]

View File

@@ -0,0 +1,29 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:dns-autoscaler
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,22 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ServiceAccount
apiVersion: v1
metadata:
name: dns-autoscaler
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,87 @@
---
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: dns-autoscaler{{ coredns_ordinal_suffix }}
namespace: kube-system
labels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
template:
metadata:
labels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
nodeSelector:
{{ dns_autoscaler_deployment_nodeselector}}
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
{% if dns_autoscaler_extra_tolerations is defined %}
{{ dns_autoscaler_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }}
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ""
containers:
- name: autoscaler
image: "{{ dnsautoscaler_image_repo }}:{{ dnsautoscaler_image_tag }}"
resources:
requests:
cpu: {{ dns_autoscaler_cpu_requests }}
memory: {{ dns_autoscaler_memory_requests }}
readinessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --default-params={"linear":{"preventSinglePointFailure":{{ dns_prevent_single_point_failure }},"coresPerReplica":{{ dns_cores_per_replica }},"nodesPerReplica":{{ dns_nodes_per_replica }},"min":{{ dns_min_replicas }}}}
- --logtostderr=true
- --v=2
- --configmap=dns-autoscaler{{ coredns_ordinal_suffix }}
- --target=Deployment/coredns{{ coredns_ordinal_suffix }}
serviceAccountName: dns-autoscaler

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Endpoints
metadata:
name: etcd-metrics
namespace: kube-system
labels:
k8s-app: etcd
app.kubernetes.io/managed-by: Kubespray
subsets:
{% for etcd_metrics_address, etcd_host in etcd_metrics_addresses.split(',') | zip(etcd_hosts) %}
- addresses:
- ip: {{ etcd_metrics_address | urlsplit('hostname') }}
targetRef:
kind: Node
name: {{ etcd_host }}
ports:
- name: http-metrics
port: {{ etcd_metrics_address | urlsplit('port') }}
protocol: TCP
{% endfor %}

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: etcd-metrics
namespace: kube-system
labels:
{{ etcd_metrics_service_labels | to_yaml(indent=2, width=1337) | indent(width=4) }}
spec:
ports:
- name: http-metrics
protocol: TCP
port: {{ etcd_metrics_port }}
# targetPort:

View File

@@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: netchecker-agent
name: netchecker-agent
namespace: {{ netcheck_namespace }}
spec:
selector:
matchLabels:
app: netchecker-agent
template:
metadata:
name: netchecker-agent
labels:
app: netchecker-agent
spec:
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
tolerations:
- effect: NoSchedule
operator: Exists
nodeSelector:
kubernetes.io/os: linux
containers:
- name: netchecker-agent
image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"
- "-serverendpoint=netchecker-service:8081"
- "-reportinterval={{ agent_report_interval }}"
resources:
limits:
cpu: {{ netchecker_agent_cpu_limit }}
memory: {{ netchecker_agent_memory_limit }}
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
securityContext:
runAsUser: {{ netchecker_agent_user | default('0') }}
runAsGroup: {{ netchecker_agent_group | default('0') }}
serviceAccountName: netchecker-agent
updateStrategy:
rollingUpdate:
maxUnavailable: 100%
type: RollingUpdate

View File

@@ -0,0 +1,14 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
rules:
- apiGroups:
- policy
resourceNames:
- netchecker-agent-hostnet
resources:
- podsecuritypolicies
verbs:
- use

View File

@@ -0,0 +1,13 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
subjects:
- kind: ServiceAccount
name: netchecker-agent
namespace: {{ netcheck_namespace }}
roleRef:
kind: ClusterRole
name: psp:netchecker-agent-hostnet
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: netchecker-agent-hostnet
name: netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
spec:
selector:
matchLabels:
app: netchecker-agent-hostnet
template:
metadata:
name: netchecker-agent-hostnet
labels:
app: netchecker-agent-hostnet
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: netchecker-agent
image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"
- "-serverendpoint=netchecker-service:8081"
- "-reportinterval={{ agent_report_interval }}"
resources:
limits:
cpu: {{ netchecker_agent_cpu_limit }}
memory: {{ netchecker_agent_memory_limit }}
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
securityContext:
runAsUser: {{ netchecker_agent_user | default('0') }}
runAsGroup: {{ netchecker_agent_group | default('0') }}
serviceAccountName: netchecker-agent
updateStrategy:
rollingUpdate:
maxUnavailable: 100%
type: RollingUpdate

View File

@@ -0,0 +1,44 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: netchecker-agent-hostnet
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: netchecker-agent
namespace: {{ netcheck_namespace }}

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: "{{ netcheck_namespace }}"
labels:
name: "{{ netcheck_namespace }}"

View File

@@ -0,0 +1,9 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get"]

View File

@@ -0,0 +1,13 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
subjects:
- kind: ServiceAccount
name: netchecker-server
namespace: {{ netcheck_namespace }}
roleRef:
kind: ClusterRole
name: netchecker-server
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,83 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
labels:
app: netchecker-server
spec:
replicas: 1
selector:
matchLabels:
app: netchecker-server
template:
metadata:
name: netchecker-server
labels:
app: netchecker-server
spec:
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
volumes:
- name: etcd-data
emptyDir: {}
containers:
- name: netchecker-server
image: "{{ netcheck_server_image_repo }}:{{ netcheck_server_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_server_cpu_limit }}
memory: {{ netchecker_server_memory_limit }}
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }}
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8081
args:
- -v=5
- -logtostderr
- -kubeproxyinit=false
- -endpoint=0.0.0.0:8081
- -etcd-endpoints=http://127.0.0.1:2379
- name: etcd
image: "{{ etcd_image_repo }}:{{ netcheck_etcd_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- etcd
- --listen-client-urls=http://127.0.0.1:2379
- --advertise-client-urls=http://127.0.0.1:2379
- --data-dir=/var/lib/etcd
- --enable-v2
- --force-new-cluster
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd-data
resources:
limits:
cpu: {{ netchecker_etcd_cpu_limit }}
memory: {{ netchecker_etcd_memory_limit }}
requests:
cpu: {{ netchecker_etcd_cpu_requests }}
memory: {{ netchecker_etcd_memory_requests }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }}
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
tolerations:
- effect: NoSchedule
operator: Exists
serviceAccountName: netchecker-server

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: netchecker-service
namespace: {{ netcheck_namespace }}
spec:
selector:
app: netchecker-server
ports:
-
protocol: TCP
port: 8081
targetPort: 8081
nodePort: {{ netchecker_port }}
type: NodePort

View File

@@ -0,0 +1,182 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
{% for block in nodelocaldns_external_zones %}
{{ block['zones'] | join(' ') }} {
errors
cache {{ block['cache'] | default(30) }}
reload
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
{% endif %}
loop
bind {{ nodelocaldns_ip }}
forward . {{ block['nameservers'] | join(' ') }}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
log
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endfor %}
{% endif %}
{{ dns_domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
health {{ nodelocaldns_ip }}:{{ nodelocaldns_health_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
}
.:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} {
{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %}
{{ optname }} {{ optvalue }}
{% endfor %}
}{% endif %}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% if enable_nodelocaldns_secondary %}
Corefile-second: |
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
{% for block in nodelocaldns_external_zones %}
{{ block['zones'] | join(' ') }} {
errors
cache {{ block['cache'] | default(30) }}
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ block['nameservers'] | join(' ') }}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
log
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endfor %}
{% endif %}
{{ dns_domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
health {{ nodelocaldns_ip }}:{{ nodelocaldns_second_health_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
}
.:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} {
{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %}
{{ optname }} {{ optvalue }}
{% endfor %}
}{% endif %}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endif %}
{% if dns_etchosts | default(None) %}
hosts: |
{{ dns_etchosts | indent(width=4, first=False) }}
{% endif %}

View File

@@ -0,0 +1,115 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns
template:
metadata:
labels:
k8s-app: nodelocaldns
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '{{ nodelocaldns_prometheus_port }}'
spec:
nodeSelector:
{{ nodelocaldns_ds_nodeselector }}
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
containers:
- name: node-cache
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}"
resources:
limits:
memory: {{ nodelocaldns_memory_limit }}
requests:
cpu: {{ nodelocaldns_cpu_requests }}
memory: {{ nodelocaldns_memory_requests }}
args:
- -localip
- {{ nodelocaldns_ip }}
- -conf
- /etc/coredns/Corefile
- -upstreamsvc
- coredns
{% if enable_nodelocaldns_secondary %}
- -skipteardown
{% else %}
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
{% endif %}
securityContext:
privileged: true
{% if nodelocaldns_bind_metrics_host_ip %}
env:
- name: MY_HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{% endif %}
livenessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile
path: Corefile
{% if dns_etchosts | default(None) %}
- key: hosts
path: hosts
{% endif %}
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,103 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns-second
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns-second
template:
metadata:
labels:
k8s-app: nodelocaldns-second
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '{{ nodelocaldns_secondary_prometheus_port }}'
spec:
nodeSelector:
{{ nodelocaldns_ds_nodeselector }}
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
containers:
- name: node-cache
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}"
resources:
limits:
memory: {{ nodelocaldns_memory_limit }}
requests:
cpu: {{ nodelocaldns_cpu_requests }}
memory: {{ nodelocaldns_memory_requests }}
args: [ "-localip", "{{ nodelocaldns_ip }}", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns", "-skipteardown" ]
securityContext:
privileged: true
{% if nodelocaldns_bind_metrics_host_ip %}
env:
- name: MY_HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{% endif %}
livenessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
lifecycle:
preStop:
exec:
command:
- sh
- -c
- sleep {{ nodelocaldns_secondary_skew_seconds }} && kill -9 1
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile-second
path: Corefile
{% if dns_etchosts | default(None) %}
- key: hosts
path: hosts
{% endif %}
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Implement a time skew between the main nodelocaldns and this secondary.
# Since the two nodelocaldns instances share the :53 port, we want to keep
# at least one running at any time enven if the manifests are replaced simultaneously
terminationGracePeriodSeconds: {{ nodelocaldns_secondary_skew_seconds }}
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View File

@@ -0,0 +1,5 @@
---
argocd_enabled: false
argocd_version: v2.5.5
argocd_namespace: argocd
# argocd_admin_password:

View File

@@ -0,0 +1,79 @@
---
- name: Kubernetes Apps | Install yq
become: yes
get_url:
url: "https://github.com/mikefarah/yq/releases/download/v4.30.6/yq_linux_{{ host_architecture }}"
dest: "{{ bin_dir }}/yq"
mode: '0755'
- name: Kubernetes Apps | Set ArgoCD template list
set_fact:
argocd_templates:
- name: namespace
file: argocd-namespace.yml
- name: install
file: argocd-install.yml
namespace: "{{ argocd_namespace }}"
url: "https://raw.githubusercontent.com/argoproj/argo-cd/{{ argocd_version }}/manifests/install.yaml"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Download ArgoCD remote manifests
become: yes
get_url:
url: "{{ item.url }}"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
loop_control:
label: "{{ item.file }}"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Set ArgoCD namespace for remote manifests
become: yes
command: |
{{ bin_dir }}/yq eval-all -i '.metadata.namespace="{{ argocd_namespace }}"' {{ kube_config_dir }}/{{ item.file }}
with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
loop_control:
label: "{{ item.file }}"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Create ArgoCD manifests from templates
become: yes
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items: "{{ argocd_templates | selectattr('url', 'undefined') | list }}"
loop_control:
label: "{{ item.file }}"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Install ArgoCD
become: yes
kube:
name: ArgoCD
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.file }}"
state: latest
with_items: "{{ argocd_templates }}"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
# https://github.com/argoproj/argo-cd/blob/master/docs/faq.md#i-forgot-the-admin-password-how-do-i-reset-it
- name: Kubernetes Apps | Set ArgoCD custom admin password
become: yes
shell: |
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n {{ argocd_namespace }} patch secret argocd-secret -p \
'{
"stringData": {
"admin.password": "{{ argocd_admin_password | password_hash('bcrypt') }}",
"admin.passwordMtime": "'$(date +%FT%T%Z)'"
}
}'
when:
- argocd_admin_password is defined
- "inventory_hostname == groups['kube_control_plane'][0]"

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{argocd_namespace}}
labels:
app: argocd

View File

@@ -0,0 +1,6 @@
---
oci_security_list_management: All
oci_use_instance_principals: false
oci_cloud_controller_version: 0.7.0
oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci

View File

@@ -0,0 +1,67 @@
---
- name: "OCI Cloud Controller | Credentials Check | oci_private_key"
fail:
msg: "oci_private_key is missing"
when:
- not oci_use_instance_principals
- oci_private_key is not defined or not oci_private_key
- name: "OCI Cloud Controller | Credentials Check | oci_region_id"
fail:
msg: "oci_region_id is missing"
when:
- not oci_use_instance_principals
- oci_region_id is not defined or not oci_region_id
- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id"
fail:
msg: "oci_tenancy_id is missing"
when:
- not oci_use_instance_principals
- oci_tenancy_id is not defined or not oci_tenancy_id
- name: "OCI Cloud Controller | Credentials Check | oci_user_id"
fail:
msg: "oci_user_id is missing"
when:
- not oci_use_instance_principals
- oci_user_id is not defined or not oci_user_id
- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint"
fail:
msg: "oci_user_fingerprint is missing"
when:
- not oci_use_instance_principals
- oci_user_fingerprint is not defined or not oci_user_fingerprint
- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id"
fail:
msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides"
when:
- oci_compartment_id is not defined or not oci_compartment_id
- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id"
fail:
msg: "oci_vnc_id is missing. This is the Virtual Cloud Network in which the cluster resides"
when:
- oci_vnc_id is not defined or not oci_vnc_id
- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id"
fail:
msg: "oci_subnet1_id is missingg. This is the first subnet to which loadbalancers will be added"
when:
- oci_subnet1_id is not defined or not oci_subnet1_id
- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
fail:
msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
when:
- oci_cloud_controller_version is version_compare('0.7.0', '<')
- oci_subnet2_id is not defined or not oci_subnet2_id
- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
fail:
msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
when:
- oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]

View File

@@ -0,0 +1,34 @@
---
- include: credentials-check.yml
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template:
src: controller-manager-config.yml.j2
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
mode: 0644
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Slurp Configuration"
slurp:
src: "{{ kube_config_dir }}/controller-manager-config.yml"
register: controller_manager_config
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ controller_manager_config.content }}"
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Generate Manifests"
template:
src: oci-cloud-provider.yml.j2
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
mode: 0644
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Apply Manifests"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,90 @@
{% macro private_key() %}{{ oci_private_key }}{% endmacro %}
{% if oci_use_instance_principals %}
# (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
# Ensure you have setup the following OCI policies and your kubernetes nodes are running within them
# allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
{% endif %}
auth:
{% if oci_use_instance_principals %}
# This key is put here too for backwards compatibility
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
region: {{ oci_region_id }}
tenancy: {{ oci_tenancy_id }}
user: {{ oci_user_id }}
key: |
{{ oci_private_key }}
{% if oci_private_key_passphrase is defined %}
passphrase: {{ oci_private_key_passphrase }}
{% endif %}
fingerprint: {{ oci_user_fingerprint }}
{% endif %}
# compartment configures Compartment within which the cluster resides.
compartment: {{ oci_compartment_id }}
# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides.
vcn: {{ oci_vnc_id }}
loadBalancer:
# subnet1 configures one of two subnets to which load balancers will be added.
# OCI load balancers require two subnets to ensure high availability.
subnet1: {{ oci_subnet1_id }}
{% if oci_subnet2_id is defined %}
# subnet2 configures the second of two subnets to which load balancers will be
# added. OCI load balancers require two subnets to ensure high availability.
subnet2: {{ oci_subnet2_id }}
{% endif %}
# SecurityListManagementMode configures how security lists are managed by the CCM.
# "All" (default): Manage all required security list rules for load balancer services.
# "Frontend": Manage only security list rules for ingress to the load
# balancer. Requires that the user has setup a rule that
# allows inbound traffic to the appropriate ports for kube
# proxy health port, node port ranges, and health check port ranges.
# E.g. 10.82.0.0/16 30000-32000.
# "None": Disables all security list management. Requires that the
# user has setup a rule that allows inbound traffic to the
# appropriate ports for kube proxy health port, node port
# ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000.
# Additionally requires the user to mange rules to allow
# inbound traffic to load balancers.
securityListManagementMode: {{ oci_security_list_management }}
{% if oci_security_lists is defined and oci_security_lists|length > 0 %}
# Optional specification of which security lists to modify per subnet. This does not apply if security list management is off.
securityLists:
{% for subnet_ocid, list_ocid in oci_security_lists.items() %}
{{ subnet_ocid }}: {{ list_ocid }}
{% endfor %}
{% endif %}
{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %}
# Optional rate limit controls for accessing OCI API
rateLimiter:
{% if oci_rate_limit.rate_limit_qps_read %}
rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }}
{% endif %}
{% if oci_rate_limit.rate_limit_qps_write %}
rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }}
{% endif %}
{% if oci_rate_limit.rate_limit_bucket_read %}
rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }}
{% endif %}
{% if oci_rate_limit.rate_limit_bucket_write %}
rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }}
{% endif %}
{% endif %}

View File

@@ -0,0 +1,73 @@
apiVersion: v1
data:
cloud-provider.yaml: {{ controller_manager_config_base64 }}
kind: Secret
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
type: Opaque
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
labels:
k8s-app: oci-cloud-controller-manager
spec:
selector:
matchLabels:
component: oci-cloud-controller-manager
tier: control-plane
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
component: oci-cloud-controller-manager
tier: control-plane
spec:
{% if oci_cloud_controller_pull_secret is defined %}
imagePullSecrets:
- name: {{oci_cloud_controller_pull_secret}}
{% endif %}
serviceAccountName: cloud-controller-manager
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
volumes:
- name: cfg
secret:
secretName: oci-cloud-controller-manager
- name: kubernetes
hostPath:
path: /etc/kubernetes
containers:
- name: oci-cloud-controller-manager
image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}}
command: ["/usr/local/bin/oci-cloud-controller-manager"]
args:
- --cloud-config=/etc/oci/cloud-provider.yaml
- --cloud-provider=oci
- --leader-elect-resource-lock=configmaps
- -v=2
volumeMounts:
- name: cfg
mountPath: /etc/oci
readOnly: true
- name: kubernetes
mountPath: /etc/kubernetes
readOnly: true

View File

@@ -0,0 +1,65 @@
---
podsecuritypolicy_restricted_spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
runAsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
podsecuritypolicy_privileged_spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
runAsGroup:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false
# This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags
allowedUnsafeSysctls:
- '*'

View File

@@ -0,0 +1,8 @@
---
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: k8s-cluster-critical
value: 1000000000
globalDefault: false
description: "This priority class should only be used by the pods installed using kubespray."

View File

@@ -0,0 +1,124 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- watch
- patch
- apiGroups:
- ""
resources:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
# For leader election
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
# For the PVL
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- list
- watch
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: oci-cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system

View File

@@ -0,0 +1,109 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result
until: result.status == 200
retries: 10
delay: 6
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
mode: 0640
register: node_crb_manifest
when:
- rbac_enabled
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
name: "kubespray:system:node"
kubectl: "{{ bin_dir }}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-crb.yml"
state: latest
register: result
until: result is succeeded
retries: 10
delay: 6
when:
- rbac_enabled
- node_crb_manifest.changed
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
template:
src: "node-webhook-cr.yml.j2"
dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
mode: 0640
register: node_webhook_cr_manifest
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Apply webhook ClusterRole
kube:
name: "system:node-webhook"
kubectl: "{{ bin_dir }}/kubectl"
resource: "clusterrole"
filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
state: latest
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_cr_manifest.changed
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
template:
src: "node-webhook-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
mode: 0640
register: node_webhook_crb_manifest
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Grant system:nodes the webhook ClusterRole
kube:
name: "system:node-webhook"
kubectl: "{{ bin_dir }}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
state: latest
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_crb_manifest.changed
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- include_tasks: oci.yml
tags: oci
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
when: inventory_hostname == groups['kube_control_plane']|last
- name: PriorityClass | Create k8s-cluster-critical
kube:
name: k8s-cluster-critical
kubectl: "{{ bin_dir }}/kubectl"
resource: "PriorityClass"
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
state: latest
when: inventory_hostname == groups['kube_control_plane']|last

View File

@@ -0,0 +1,19 @@
---
- name: Copy OCI RBAC Manifest
copy:
src: "oci-rbac.yml"
dest: "{{ kube_config_dir }}/oci-rbac.yml"
mode: 0640
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply OCI RBAC
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-rbac.yml"
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: "kube-system"

View File

@@ -0,0 +1,17 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: kubespray:system:node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes

View File

@@ -0,0 +1,20 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:node-webhook
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"

View File

@@ -0,0 +1,17 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:node-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-webhook
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes

View File

@@ -0,0 +1,35 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:vsphere-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:vsphere-cloud-provider
roleRef:
kind: ClusterRole
name: system:vsphere-cloud-provider
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: vsphere-cloud-provider
namespace: kube-system

View File

@@ -0,0 +1,8 @@
---
dependencies:
- role: kubernetes-apps/container_engine_accelerator/nvidia_gpu
when: nvidia_accelerator_enabled
tags:
- apps
- nvidia_gpu
- container_engine_accelerator

View File

@@ -0,0 +1,14 @@
---
nvidia_accelerator_enabled: false
nvidia_driver_version: "390.87"
nvidia_gpu_tesla_base_url: https://us.download.nvidia.com/tesla/
nvidia_gpu_gtx_base_url: http://us.download.nvidia.com/XFree86/Linux-x86_64/
nvidia_gpu_flavor: tesla
nvidia_url_end: "{{ nvidia_driver_version }}/NVIDIA-Linux-x86_64-{{ nvidia_driver_version }}.run"
nvidia_driver_install_container: false
nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
nvidia_driver_install_ubuntu_container: registry.k8s.io/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
nvidia_driver_install_supported: false
nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
nvidia_gpu_nodes: []
nvidia_gpu_device_plugin_memory: 30Mi

View File

@@ -0,0 +1,55 @@
---
- name: Container Engine Acceleration Nvidia GPU| gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
skip: true
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
set_fact:
nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}"
when: nvidia_gpu_flavor|lower == "tesla"
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
set_fact:
nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}"
when: nvidia_gpu_flavor|lower == "gtx"
- name: Container Engine Acceleration Nvidia GPU | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/container_engine_accelerator"
owner: root
group: root
mode: 0755
recurse: true
- name: Container Engine Acceleration Nvidia GPU | Create manifests for nvidia accelerators
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.file }}"
mode: 0644
with_items:
- { name: nvidia-driver-install-daemonset, file: nvidia-driver-install-daemonset.yml, type: daemonset }
- { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
register: container_engine_accelerator_manifests
when:
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container
- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ container_engine_accelerator_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported

View File

@@ -0,0 +1,60 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nvidia-gpu-device-plugin
namespace: kube-system
labels:
k8s-app: nvidia-gpu-device-plugin
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nvidia-gpu-device-plugin
template:
metadata:
labels:
k8s-app: nvidia-gpu-device-plugin
spec:
priorityClassName: system-node-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "nvidia.com/gpu"
operator: Exists
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins
- name: dev
hostPath:
path: /dev
containers:
- image: "{{ nvidia_gpu_device_plugin_container }}"
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin
resources:
requests:
cpu: 50m
memory: {{ nvidia_gpu_device_plugin_memory }}
limits:
cpu: 50m
memory: {{ nvidia_gpu_device_plugin_memory }}
securityContext:
privileged: true
volumeMounts:
- name: device-plugin
mountPath: /device-plugin
- name: dev
mountPath: /dev
updateStrategy:
type: RollingUpdate

View File

@@ -0,0 +1,82 @@
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nvidia-driver-installer
namespace: kube-system
spec:
selector:
matchLabels:
name: nvidia-driver-installer
template:
metadata:
labels:
name: nvidia-driver-installer
spec:
priorityClassName: system-node-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "nvidia.com/gpu"
operator: Exists
tolerations:
- key: "nvidia.com/gpu"
effect: "NoSchedule"
operator: "Exists"
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
volumes:
- name: dev
hostPath:
path: /dev
- name: nvidia-install-dir-host
hostPath:
path: /home/kubernetes/bin/nvidia
- name: root-mount
hostPath:
path: /
initContainers:
- image: "{{ nvidia_driver_install_container }}"
name: nvidia-driver-installer
resources:
requests:
cpu: 0.15
securityContext:
privileged: true
env:
- name: NVIDIA_INSTALL_DIR_HOST
value: /home/kubernetes/bin/nvidia
- name: NVIDIA_INSTALL_DIR_CONTAINER
value: /usr/local/nvidia
- name: ROOT_MOUNT_DIR
value: /root
- name: NVIDIA_DRIVER_VERSION
value: "{{ nvidia_driver_version }}"
- name: NVIDIA_DRIVER_DOWNLOAD_URL
value: "{{ nvidia_driver_download_url_default }}"
volumeMounts:
- name: nvidia-install-dir-host
mountPath: /usr/local/nvidia
- name: dev
mountPath: /dev
- name: root-mount
mountPath: /root
containers:
- image: "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
name: pause

View File

@@ -0,0 +1,3 @@
---
nvidia_driver_install_container: "{{ nvidia_driver_install_centos_container }}"
nvidia_driver_install_supported: true

View File

@@ -0,0 +1,3 @@
---
nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}"
nvidia_driver_install_supported: true

View File

@@ -0,0 +1,3 @@
---
nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}"
nvidia_driver_install_supported: true

View File

@@ -0,0 +1,19 @@
---
- name: crun | Copy runtime class manifest
template:
src: runtimeclass-crun.yml
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
mode: "0664"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: crun | Apply manifests
kube:
name: "runtimeclass-crun"
kubectl: "{{ bin_dir }}/kubectl"
resource: "runtimeclass"
filename: "{{ kube_config_dir }}/runtimeclass-crun.yml"
state: "latest"
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,6 @@
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: crun
handler: crun

View File

@@ -0,0 +1,34 @@
---
- name: gVisor | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/gvisor"
owner: root
group: root
mode: 0755
recurse: true
- name: gVisor | Templates List
set_fact:
gvisor_templates:
- { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass }
- name: gVisort | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}"
mode: 0644
with_items: "{{ gvisor_templates }}"
register: gvisor_manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: gVisor | Apply manifests
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/gvisor/{{ item.item.file }}"
state: "latest"
with_items: "{{ gvisor_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,6 @@
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: gvisor
handler: runsc

View File

@@ -0,0 +1,5 @@
---
kata_containers_qemu_overhead: true
kata_containers_qemu_overhead_fixed_cpu: 250m
kata_containers_qemu_overhead_fixed_memory: 160Mi

View File

@@ -0,0 +1,35 @@
---
- name: Kata Containers | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/kata_containers"
owner: root
group: root
mode: 0755
recurse: true
- name: Kata Containers | Templates list
set_fact:
kata_containers_templates:
- { name: runtimeclass-kata-qemu, file: runtimeclass-kata-qemu.yml, type: runtimeclass }
- name: Kata Containers | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/kata_containers/{{ item.file }}"
mode: 0644
with_items: "{{ kata_containers_templates }}"
register: kata_containers_manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kata Containers | Apply manifests
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/kata_containers/{{ item.item.file }}"
state: "latest"
with_items: "{{ kata_containers_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,12 @@
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: kata-qemu
handler: kata-qemu
{% if kata_containers_qemu_overhead %}
overhead:
podFixed:
cpu: {{ kata_containers_qemu_overhead_fixed_cpu }}
memory: {{ kata_containers_qemu_overhead_fixed_memory }}
{% endif %}

View File

@@ -0,0 +1,31 @@
---
dependencies:
- role: kubernetes-apps/container_runtimes/kata_containers
when: kata_containers_enabled
tags:
- apps
- kata-containers
- container-runtimes
- role: kubernetes-apps/container_runtimes/gvisor
when: gvisor_enabled
tags:
- apps
- gvisor
- container-runtimes
- role: kubernetes-apps/container_runtimes/crun
when: crun_enabled
tags:
- apps
- crun
- container-runtimes
- role: kubernetes-apps/container_runtimes/youki
when:
- youki_enabled
- container_manager == 'crio'
tags:
- apps
- youki
- container-runtimes

View File

@@ -0,0 +1,19 @@
---
- name: youki | Copy runtime class manifest
template:
src: runtimeclass-youki.yml
dest: "{{ kube_config_dir }}/runtimeclass-youki.yml"
mode: "0664"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: youki | Apply manifests
kube:
name: "runtimeclass-youki"
kubectl: "{{ bin_dir }}/kubectl"
resource: "runtimeclass"
filename: "{{ kube_config_dir }}/runtimeclass-youki.yml"
state: "latest"
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,6 @@
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: youki
handler: youki

View File

@@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
reviewers:
- alijahnas
- luckySB

View File

@@ -0,0 +1,11 @@
---
aws_ebs_csi_enable_volume_scheduling: true
aws_ebs_csi_enable_volume_snapshot: false
aws_ebs_csi_enable_volume_resizing: false
aws_ebs_csi_controller_replicas: 1
aws_ebs_csi_plugin_image_tag: latest
# Add annotions to ebs_csi_controller. Useful if using kube2iam for role assumption
# aws_ebs_csi_annotations:
# - key: iam.amazonaws.com/role
# value: your-ebs-role-arn

View File

@@ -0,0 +1,26 @@
---
- name: AWS CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: aws-ebs-csi-driver, file: aws-ebs-csi-driver.yml}
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice-rbac.yml}
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
- {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
register: aws_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: AWS CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ aws_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,180 @@
# Controller Service
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-csi-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# The permissions in this ClusterRole are tightly coupled with the version of csi-attacher used. More information about this can be found in kubernetes-csi/external-attacher.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-attacher-role
apiGroup: rbac.authorization.k8s.io
{% if aws_ebs_csi_enable_volume_snapshot %}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
{% endif %}
{% if aws_ebs_csi_enable_volume_resizing %}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-resizer-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-resizer-role
apiGroup: rbac.authorization.k8s.io
{% endif %}

View File

@@ -0,0 +1,132 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: {{ aws_ebs_csi_controller_replicas }}
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
labels:
app: ebs-csi-controller
app.kubernetes.io/name: aws-ebs-csi-driver
{% if aws_ebs_csi_annotations is defined %}
annotations:
{% for annotation in aws_ebs_csi_annotations %}
{{ annotation.key }}: {{ annotation.value }}
{% endfor %}
{% endif %}
spec:
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ebs-csi-controller-sa
priorityClassName: system-cluster-critical
containers:
- name: ebs-plugin
image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }}
args:
- --endpoint=$(CSI_ENDPOINT)
{% if aws_ebs_csi_extra_volume_tags is defined %}
- --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }}
{% endif %}
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: key_id
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: access_key
optional: true
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
- name: csi-provisioner
image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
{% if aws_ebs_csi_enable_volume_scheduling %}
- --feature-gates=Topology=true
{% endif %}
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{% if aws_ebs_csi_enable_volume_snapshot %}
- name: csi-snapshotter
image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --timeout=15s
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{% endif %}
{% if aws_ebs_csi_enable_volume_resizing %}
- name: csi-resizer
image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{% endif %}
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -0,0 +1,8 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false

View File

@@ -0,0 +1,101 @@
---
# Node Service
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
labels:
app: ebs-csi-node
app.kubernetes.io/name: aws-ebs-csi-driver
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
priorityClassName: system-node-critical
containers:
- name: ebs-plugin
securityContext:
privileged: true
image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }}
args:
- --endpoint=$(CSI_ENDPOINT)
{% if aws_ebs_csi_extra_volume_tags is defined %}
- --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }}
{% endif %}
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
- name: node-driver-registrar
image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"]
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory

View File

@@ -0,0 +1,6 @@
---
azure_csi_use_instance_metadata: true
azure_csi_controller_replicas: 2
azure_csi_plugin_image_tag: latest
azure_csi_controller_affinity: {}
azure_csi_node_affinity: {}

View File

@@ -0,0 +1,54 @@
---
- name: Azure CSI Driver | check azure_csi_tenant_id value
fail:
msg: "azure_csi_tenant_id is missing"
when: azure_csi_tenant_id is not defined or not azure_csi_tenant_id
- name: Azure CSI Driver | check azure_csi_subscription_id value
fail:
msg: "azure_csi_subscription_id is missing"
when: azure_csi_subscription_id is not defined or not azure_csi_subscription_id
- name: Azure CSI Driver | check azure_csi_aad_client_id value
fail:
msg: "azure_csi_aad_client_id is missing"
when: azure_csi_aad_client_id is not defined or not azure_csi_aad_client_id
- name: Azure CSI Driver | check azure_csi_aad_client_secret value
fail:
msg: "azure_csi_aad_client_secret is missing"
when: azure_csi_aad_client_secret is not defined or not azure_csi_aad_client_secret
- name: Azure CSI Driver | check azure_csi_resource_group value
fail:
msg: "azure_csi_resource_group is missing"
when: azure_csi_resource_group is not defined or not azure_csi_resource_group
- name: Azure CSI Driver | check azure_csi_location value
fail:
msg: "azure_csi_location is missing"
when: azure_csi_location is not defined or not azure_csi_location
- name: Azure CSI Driver | check azure_csi_subnet_name value
fail:
msg: "azure_csi_subnet_name is missing"
when: azure_csi_subnet_name is not defined or not azure_csi_subnet_name
- name: Azure CSI Driver | check azure_csi_security_group_name value
fail:
msg: "azure_csi_security_group_name is missing"
when: azure_csi_security_group_name is not defined or not azure_csi_security_group_name
- name: Azure CSI Driver | check azure_csi_vnet_name value
fail:
msg: "azure_csi_vnet_name is missing"
when: azure_csi_vnet_name is not defined or not azure_csi_vnet_name
- name: Azure CSI Driver | check azure_csi_vnet_resource_group value
fail:
msg: "azure_csi_vnet_resource_group is missing"
when: azure_csi_vnet_resource_group is not defined or not azure_csi_vnet_resource_group
- name: "Azure CSI Driver | check azure_csi_use_instance_metadata is a bool"
assert:
that: azure_csi_use_instance_metadata | type_debug == 'bool'

View File

@@ -0,0 +1,44 @@
---
- include_tasks: azure-credential-check.yml
- name: Azure CSI Driver | Write Azure CSI cloud-config
template:
src: "azure-csi-cloud-config.j2"
dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Azure CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/azure_csi_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Azure CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: azure-csi-azuredisk-driver, file: azure-csi-azuredisk-driver.yml}
- {name: azure-csi-cloud-config-secret, file: azure-csi-cloud-config-secret.yml}
- {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller-rbac.yml}
- {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller.yml}
- {name: azure-csi-azuredisk-node-rbac, file: azure-csi-azuredisk-node-rbac.yml}
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
register: azure_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Azure CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ azure_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,230 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-azuredisk-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-cluster-driver-registrar-role
rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csidrivers"]
verbs: ["create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-driver-registrar-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-cluster-driver-registrar-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-resizer-role
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-resizer-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azuredisk-controller-secret-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,179 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-azuredisk-controller
namespace: kube-system
spec:
replicas: {{ azure_csi_controller_replicas }}
selector:
matchLabels:
app: csi-azuredisk-controller
template:
metadata:
labels:
app: csi-azuredisk-controller
spec:
hostNetwork: true
serviceAccountName: csi-azuredisk-controller-sa
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
effect: "NoSchedule"
{% if azure_csi_controller_affinity %}
affinity:
{{ azure_csi_controller_affinity | to_nice_yaml | indent(width=8) }}
{% endif %}
containers:
- name: csi-provisioner
image: {{ azure_csi_image_repo }}/csi-provisioner:{{ azure_csi_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--feature-gates=Topology=true"
- "--csi-address=$(ADDRESS)"
- "--v=2"
- "--timeout=15s"
- "--leader-election"
- "--worker-threads=40"
- "--extra-create-metadata=true"
- "--strict-topology=true"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-attacher
image: {{ azure_csi_image_repo }}/csi-attacher:{{ azure_csi_attacher_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "-v=2"
- "-csi-address=$(ADDRESS)"
- "-timeout=600s"
- "-leader-election"
- "-worker-threads=500"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-snapshotter
image: {{ azure_csi_image_repo }}/csi-snapshotter:{{ azure_csi_snapshotter_image_tag }}
args:
- "-csi-address=$(ADDRESS)"
- "-leader-election"
- "-v=2"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-resizer
image: {{ azure_csi_image_repo }}/csi-resizer:{{ azure_csi_resizer_image_tag }}
args:
- "-csi-address=$(ADDRESS)"
- "-v=2"
- "-leader-election"
- '-handle-volume-inuse-error=false'
- "-timeout=60s"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }}
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29602
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azuredisk
image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metrics-address=0.0.0.0:29604"
- "--disable-avset-nodes=true"
- "--drivername=disk.csi.azure.com"
- "--cloud-config-secret-name=cloud-config"
- "--cloud-config-secret-namespace=kube-system"
ports:
- containerPort: 29602
name: healthz
protocol: TCP
- containerPort: 29604
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: AZURE_CREDENTIAL_FILE
value: "/etc/kubernetes/azure.json"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /etc/kubernetes/
name: azure-cred
readOnly: true
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
emptyDir: {}
- name: azure-cred
secret:
secretName: cloud-config

View File

@@ -0,0 +1,10 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: disk.csi.azure.com
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes: # added in Kubernetes 1.16
- Persistent

View File

@@ -0,0 +1,30 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-azuredisk-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-node-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-node-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azuredisk-node-secret-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,168 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-azuredisk-node
namespace: kube-system
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-azuredisk-node
template:
metadata:
labels:
app: csi-azuredisk-node
spec:
hostNetwork: true
dnsPolicy: Default
serviceAccountName: csi-azuredisk-node-sa
nodeSelector:
kubernetes.io/os: linux
{% if azure_csi_node_affinity %}
affinity:
{{ azure_csi_node_affinity | to_nice_yaml | indent(width=8) }}
{% endif %}
priorityClassName: system-node-critical
tolerations:
- operator: Exists
containers:
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29603
- --v=2
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: {{ azure_csi_image_repo }}/csi-node-driver-registrar:{{ azure_csi_node_registrar_image_tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=2
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azuredisk
image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
- "--metrics-address=0.0.0.0:29605"
- "--enable-perf-optimization=true"
- "--drivername=disk.csi.azure.com"
- "--volume-attach-limit=-1"
- "--cloud-config-secret-name=cloud-config"
- "--cloud-config-secret-namespace=kube-system"
ports:
- containerPort: 29603
name: healthz
protocol: TCP
- containerPort: 29605
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: AZURE_CREDENTIAL_FILE
value: "/etc/kubernetes/azure.json"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /etc/kubernetes/
name: azure-cred
- mountPath: /dev
name: device-dir
- mountPath: /sys/bus/scsi/devices
name: sys-devices-dir
- mountPath: /sys/class/scsi_host/
name: scsi-host-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/disk.csi.azure.com
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
name: registration-dir
- secret:
defaultMode: 0644
secretName: cloud-config
name: azure-cred
- hostPath:
path: /dev
type: Directory
name: device-dir
- hostPath:
path: /sys/bus/scsi/devices
type: Directory
name: sys-devices-dir
- hostPath:
path: /sys/class/scsi_host/
type: Directory
name: scsi-host-dir

View File

@@ -0,0 +1,7 @@
kind: Secret
apiVersion: v1
metadata:
name: cloud-config
namespace: kube-system
data:
azure.json: {{ cloud_config_secret.content }}

View File

@@ -0,0 +1,14 @@
{
"cloud":"AzurePublicCloud",
"tenantId": "{{ azure_csi_tenant_id }}",
"subscriptionId": "{{ azure_csi_subscription_id }}",
"aadClientId": "{{ azure_csi_aad_client_id }}",
"aadClientSecret": "{{ azure_csi_aad_client_secret }}",
"location": "{{ azure_csi_location }}",
"resourceGroup": "{{ azure_csi_resource_group }}",
"vnetName": "{{ azure_csi_vnet_name }}",
"vnetResourceGroup": "{{ azure_csi_vnet_resource_group }}",
"subnetName": "{{ azure_csi_subnet_name }}",
"securityGroupName": "{{ azure_csi_security_group_name }}",
"useInstanceMetadata": {{ azure_csi_use_instance_metadata }},
}

View File

@@ -0,0 +1,30 @@
---
# To access Cinder, the CSI controller will need credentials to access
# openstack apis. Per default this values will be
# read from the environment.
cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
cinder_username: "{{ lookup('env','OS_USERNAME') }}"
cinder_password: "{{ lookup('env','OS_PASSWORD') }}"
cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}"
cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}"
cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}"
cinder_region: "{{ lookup('env','OS_REGION_NAME') }}"
cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}"
cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}"
cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
cinder_cacert: "{{ lookup('env','OS_CACERT') }}"
# For now, only Cinder v3 is supported in Cinder CSI driver
cinder_blockstorage_version: "v3"
cinder_csi_controller_replicas: 1
# Optional. Set to true, to rescan block device and verify its size before expanding
# the filesystem.
# Not all hypervizors have a /sys/class/block/XXX/device/rescan location, therefore if
# you enable this option and your hypervizor doesn't support this, you'll get a warning
# log on resize event. It is recommended to disable this option in this case.
# Defaults to false
# cinder_csi_rescan_on_resize: true
cinder_tolerations: []

View File

@@ -0,0 +1,59 @@
---
- name: Cinder CSI Driver | check cinder_auth_url value
fail:
msg: "cinder_auth_url is missing"
when: cinder_auth_url is not defined or not cinder_auth_url
- name: Cinder CSI Driver | check cinder_username value cinder_application_credential_name value
fail:
msg: "you must either set cinder_username or cinder_application_credential_name"
when:
- cinder_username is not defined or not cinder_username
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- name: Cinder CSI Driver | check cinder_application_credential_id value
fail:
msg: "cinder_application_credential_id is missing"
when:
- cinder_application_credential_name is defined
- cinder_application_credential_name|length > 0
- cinder_application_credential_id is not defined or not cinder_application_credential_id
- name: Cinder CSI Driver | check cinder_application_credential_secret value
fail:
msg: "cinder_application_credential_secret is missing"
when:
- cinder_application_credential_name is defined
- cinder_application_credential_name|length > 0
- cinder_application_credential_secret is not defined or not cinder_application_credential_secret
- name: Cinder CSI Driver | check cinder_password value
fail:
msg: "cinder_password is missing"
when:
- cinder_username is defined
- cinder_username|length > 0
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- cinder_application_credential_secret is not defined or not cinder_application_credential_secret
- cinder_password is not defined or not cinder_password
- name: Cinder CSI Driver | check cinder_region value
fail:
msg: "cinder_region is missing"
when: cinder_region is not defined or not cinder_region
- name: Cinder CSI Driver | check cinder_tenant_id value
fail:
msg: "one of cinder_tenant_id or cinder_tenant_name must be specified"
when:
- cinder_tenant_id is not defined or not cinder_tenant_id
- cinder_tenant_name is not defined or not cinder_tenant_name
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- name: Cinder CSI Driver | check cinder_domain_id value
fail:
msg: "one of cinder_domain_id or cinder_domain_name must be specified"
when:
- cinder_domain_id is not defined or not cinder_domain_id
- cinder_domain_name is not defined or not cinder_domain_name
- cinder_application_credential_name is not defined or not cinder_application_credential_name

View File

@@ -0,0 +1,11 @@
---
# include to workaround mitogen issue
# https://github.com/dw/mitogen/issues/663
- name: Cinder CSI Driver | Write cacert file
copy:
src: "{{ cinder_cacert }}"
dest: "{{ kube_config_dir }}/cinder-cacert.pem"
group: "{{ kube_cert_group }}"
mode: 0640
delegate_to: "{{ delegate_host_to_write_cacert }}"

View File

@@ -0,0 +1,56 @@
---
- include_tasks: cinder-credential-check.yml
- name: Cinder CSI Driver | Write cacert file
include_tasks: cinder-write-cacert.yml
run_once: true
loop: "{{ groups['k8s_cluster'] }}"
loop_control:
loop_var: delegate_host_to_write_cacert
when:
- inventory_hostname in groups['k8s_cluster']
- cinder_cacert is defined
- cinder_cacert | length > 0
- name: Cinder CSI Driver | Write Cinder cloud-config
template:
src: "cinder-csi-cloud-config.j2"
dest: "{{ kube_config_dir }}/cinder_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cinder CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/cinder_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cinder CSI Driver | Generate Manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: cinder-csi-driver, file: cinder-csi-driver.yml}
- {name: cinder-csi-cloud-config-secret, file: cinder-csi-cloud-config-secret.yml}
- {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin-rbac.yml}
- {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin.yml}
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin-rbac.yml}
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
- {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
register: cinder_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cinder CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ cinder_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -0,0 +1,10 @@
# This YAML file contains secret objects,
# which are necessary to run csi cinder plugin.
kind: Secret
apiVersion: v1
metadata:
name: cloud-config
namespace: kube-system
data:
cloud.conf: {{ cloud_config_secret.content }}

View File

@@ -0,0 +1,44 @@
[Global]
auth-url="{{ cinder_auth_url }}"
{% if cinder_application_credential_id|length == 0 and cinder_application_credential_name|length == 0 %}
username="{{ cinder_username }}"
password="{{ cinder_password }}"
{% endif %}
{% if cinder_application_credential_id|length > 0 %}
application-credential-id={{ cinder_application_credential_id }}
{% endif %}
{% if cinder_application_credential_name|length > 0 %}
application-credential-name={{ cinder_application_credential_name }}
{% endif %}
{% if cinder_application_credential_secret|length > 0 %}
application-credential-secret={{ cinder_application_credential_secret }}
{% endif %}
region="{{ cinder_region }}"
{% if cinder_tenant_id|length > 0 %}
tenant-id="{{ cinder_tenant_id }}"
{% endif %}
{% if cinder_tenant_name|length > 0 %}
tenant-name="{{ cinder_tenant_name }}"
{% endif %}
{% if cinder_domain_name|length > 0 %}
domain-name="{{ cinder_domain_name }}"
{% elif cinder_domain_id|length > 0 %}
domain-id ="{{ cinder_domain_id }}"
{% endif %}
{% if cinder_cacert|length > 0 %}
ca-file="{{ kube_config_dir }}/cinder-cacert.pem"
{% endif %}
[BlockStorage]
{% if cinder_blockstorage_version is defined %}
bs-version={{ cinder_blockstorage_version }}
{% endif %}
{% if cinder_csi_ignore_volume_az is defined %}
ignore-volume-az={{ cinder_csi_ignore_volume_az | bool }}
{% endif %}
{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %}
node-volume-attach-limit="{{ node_volume_attach_limit }}"
{% endif %}
{% if cinder_csi_rescan_on_resize is defined %}
rescan-on-resize={{ cinder_csi_rescan_on_resize | bool }}
{% endif %}

View File

@@ -0,0 +1,179 @@
# This YAML file contains RBAC API objects,
# which are necessary to run csi controller plugin
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-controller-sa
namespace: kube-system
---
# external attacher
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# external Provisioner
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# external snapshotter
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
# External Resizer
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-resizer-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,156 @@
# This YAML file contains CSI Controller Plugin Sidecars
# external-attacher, external-provisioner, external-snapshotter
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-cinder-controllerplugin
namespace: kube-system
spec:
replicas: {{ cinder_csi_controller_replicas }}
selector:
matchLabels:
app: csi-cinder-controllerplugin
template:
metadata:
labels:
app: csi-cinder-controllerplugin
spec:
serviceAccountName: csi-cinder-controller-sa
containers:
- name: csi-attacher
image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- --leader-election=true
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-provisioner
image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--default-fstype=ext4"
- "--extra-create-metadata"
{% if cinder_topology is defined and cinder_topology %}
- --feature-gates=Topology=true
{% endif %}
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- "--leader-election=true"
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshotter
image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--extra-create-metadata"
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- --leader-election=true
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: csi-resizer
image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--handle-volume-inuse-error=false"
{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %}
- --leader-election=true
{% endif %}
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: liveness-probe
image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: cinder-csi-plugin
image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--cluster=$(CLUSTER_NAME)"
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
- name: CLUSTER_NAME
value: kubernetes
ports:
- containerPort: 9808
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 60
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
- name: ca-certs
mountPath: /etc/ssl/certs
readOnly: true
{% if cinder_cacert is defined and cinder_cacert != "" %}
- name: cinder-cacert
mountPath: {{ kube_config_dir }}/cinder-cacert.pem
readOnly: true
{% endif %}
volumes:
- name: socket-dir
emptyDir:
- name: secret-cinderplugin
secret:
secretName: cloud-config
- name: ca-certs
hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
{% if cinder_cacert is defined and cinder_cacert != "" %}
- name: cinder-cacert
hostPath:
path: {{ kube_config_dir }}/cinder-cacert.pem
type: FileOrCreate
{% endif %}

View File

@@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: cinder.csi.openstack.org
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
- Ephemeral

View File

@@ -0,0 +1,38 @@
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-nodeplugin-role
apiGroup: rbac.authorization.k8s.io

Some files were not shown because too many files have changed in this diff Show More