디렉토리 구조 및 각 서비스 추가

This commit is contained in:
dsk-minchulahn
2024-01-03 17:29:11 +09:00
parent 98de2a7627
commit d601d0f259
1632 changed files with 207616 additions and 1 deletions

View File

@@ -0,0 +1,4 @@
OpenEBS Mayastor has been installed. Check its status by running:
$ kubectl get pods -n {{ .Release.Namespace }}
For more information or to view the documentation, visit our website at https://mayastor.gitbook.io/introduction/.

View File

@@ -0,0 +1,209 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Renders a value that contains template.
Usage:
{{ include "render" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "render" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}
{{/*
Renders the CORE server init container, if enabled
Usage:
{{ include "base_init_core_containers" . }}
*/}}
{{- define "base_init_core_containers" -}}
{{- if .Values.base.initCoreContainers.enabled }}
{{- include "render" (dict "value" .Values.base.initCoreContainers.containers "context" $) | nindent 8 }}
{{- end }}
{{- end -}}
{{/*
Renders the HA NODE AGENT init container, if enabled
Usage:
{{ include "base_init_ha_node_containers" . }}
*/}}
{{- define "base_init_ha_node_containers" -}}
{{- if .Values.base.initHaNodeContainers.enabled }}
{{- include "render" (dict "value" .Values.base.initHaNodeContainers.containers "context" $) | nindent 8 }}
{{- end }}
{{- end -}}
{{/*
Renders the base init containers for all deployments, if any
Usage:
{{ include "base_init_containers" . }}
*/}}
{{- define "base_init_containers" -}}
{{- if .Values.base.initContainers.enabled }}
{{- include "render" (dict "value" .Values.base.initContainers.containers "context" $) | nindent 8 }}
{{- end }}
{{- include "jaeger_agent_init_container" . }}
{{- end -}}
{{/*
Renders the jaeger agent init container, if enabled
Usage:
{{ include "jaeger_agent_init_container" . }}
*/}}
{{- define "jaeger_agent_init_container" -}}
{{- if .Values.base.jaeger.enabled }}
{{- if .Values.base.jaeger.initContainer }}
{{- include "render" (dict "value" .Values.base.jaeger.agent.initContainer "context" $) | nindent 8 }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Renders the base image pull secrets for all deployments, if any
Usage:
{{ include "base_pull_secrets" . }}
*/}}
{{- define "base_pull_secrets" -}}
{{- if .Values.base.imagePullSecrets.enabled }}
{{- include "render" (dict "value" .Values.base.imagePullSecrets.secrets "context" $) | nindent 8 }}
{{- end }}
{{- end -}}
{{/*
Renders the REST server init container, if enabled
Usage:
{{- include "rest_agent_init_container" . }}
*/}}
{{- define "rest_agent_init_container" -}}
{{- if .Values.base.initRestContainer.enabled }}
{{- include "render" (dict "value" .Values.base.initRestContainer.initContainer "context" $) | nindent 8 }}
{{- end }}
{{- end -}}
{{/*
Renders the jaeger scheduling rules, if any
Usage:
{{ include "jaeger_scheduling" . }}
*/}}
{{- define "jaeger_scheduling" -}}
{{- if index .Values "jaeger-operator" "affinity" }}
affinity:
{{- include "render" (dict "value" (index .Values "jaeger-operator" "affinity") "context" $) | nindent 4 }}
{{- end }}
{{- if index .Values "jaeger-operator" "tolerations" }}
tolerations:
{{- include "render" (dict "value" (index .Values "jaeger-operator" "tolerations") "context" $) | nindent 4 }}
{{- end }}
{{- end -}}
{{/* Generate Core list specification (-l param of io-engine) */}}
{{- define "cpuFlag" -}}
{{- include "coreListUniq" . -}}
{{- end -}}
{{/* Get the number of cores from the coreList */}}
{{- define "coreCount" -}}
{{- include "coreListUniq" . | split "," | len -}}
{{- end -}}
{{/* Get a list of cores as a comma-separated list */}}
{{- define "coreListUniq" -}}
{{- if .Values.io_engine.coreList -}}
{{- $cores_pre := .Values.io_engine.coreList -}}
{{- if not (kindIs "slice" .Values.io_engine.coreList) -}}
{{- $cores_pre = list $cores_pre -}}
{{- end -}}
{{- $cores := list -}}
{{- range $index, $value := $cores_pre | uniq -}}
{{- $value = $value | toString | replace " " "" }}
{{- if eq ($value | int | toString) $value -}}
{{- $cores = append $cores $value -}}
{{- end -}}
{{- end -}}
{{- $first := first $cores | required (print "At least one core must be specified in io_engine.coreList") -}}
{{- $cores | join "," -}}
{{- else -}}
{{- if gt 1 (.Values.io_engine.cpuCount | int) -}}
{{- fail ".Values.io_engine.cpuCount must be >= 1" -}}
{{- end -}}
{{- untilStep 1 (add 1 .Values.io_engine.cpuCount | int) 1 | join "," -}}
{{- end -}}
{{- end }}
{{/*
Adds the project domain to labels
Usage:
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
*/}}
{{- define "label_prefix" -}}
{{ $product := .Files.Get "product.yaml" | fromYaml }}
{{- print $product.domain -}}
{{- end -}}
<<<<<<< HEAD
{{/*
Creates the tolerations based on the global and component wise tolerations, with early eviction
Usage:
{{ include "tolerations_with_early_eviction" (dict "template" . "localTolerations" .Values.path.to.local.tolerations) }}
*/}}
{{- define "tolerations_with_early_eviction" -}}
{{- toYaml .template.Values.earlyEvictionTolerations | nindent 8 }}
{{- if .localTolerations }}
{{- toYaml .localTolerations | nindent 8 }}
{{- else if .template.Values.tolerations }}
{{- toYaml .template.Values.tolerations | nindent 8 }}
{{- end }}
{{- end }}
{{/*
Creates the tolerations based on the global and component wise tolerations
Usage:
{{ include "tolerations" (dict "template" . "localTolerations" .Values.path.to.local.tolerations) }}
*/}}
{{- define "tolerations" -}}
{{- if .localTolerations }}
{{- toYaml .localTolerations | nindent 8 }}
{{- else if .template.Values.tolerations }}
{{- toYaml .template.Values.tolerations | nindent 8 }}
{{- end }}
{{- end }}
{{/*
Generates the priority class name, with the given `template` and the `localPriorityClass`
Usage:
{{ include "priority_class" (dict "template" . "localPriorityClass" .Values.path.to.local.priorityClassName) }}
*/}}
{{- define "priority_class" -}}
{{- if typeIs "string" .localPriorityClass }}
{{- if .localPriorityClass -}}
{{ printf "%s" .localPriorityClass -}}
{{- else if .template.Values.priorityClassName -}}
{{ printf "%s" .template.Values.priorityClassName -}}
{{- else -}}
{{ printf "" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generates the priority class name, with the given `template` and the `localPriorityClass`, sets to mayastor default priority class
if both are empty
Usage:
{{ include "priority_class_with_default" (dict "template" . "localPriorityClass" .Values.path.to.local.priorityClassName) }}
*/}}
{{- define "priority_class_with_default" -}}
{{- if typeIs "string" .localPriorityClass }}
{{- if .localPriorityClass -}}
{{ printf "%s" .localPriorityClass -}}
{{- else if .template.Values.priorityClassName -}}
{{ printf "%s" .template.Values.priorityClassName -}}
{{- else -}}
{{ printf "%s-cluster-critical" .template.Release.Name -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{ if and (index .Values "localpv-provisioner" "enabled") .Values.etcd.persistence.enabled }}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: {{ tpl (.Values.etcd.localpvScConfig.basePath | quote) . }}
openebs.io/cas-type: local
name: {{ (tpl (.Values.etcd.localpvScConfig.name) .) | required (print "StorageClass name for etcd localpv storage cannot be empty") }}
provisioner: openebs.io/local
reclaimPolicy: {{ .Values.etcd.localpvScConfig.reclaimPolicy }}
volumeBindingMode: {{ .Values.etcd.localpvScConfig.volumeBindingMode }}
{{ end }}

View File

@@ -0,0 +1,22 @@
---
{{ if and .Values.etcd.persistence.enabled (eq .Values.etcd.persistence.storageClass "manual") }}
{{- range $index, $end := until (.Values.etcd.replicaCount | int) }}
apiVersion: v1
kind: PersistentVolume
metadata:
name: etcd-volume-{{ $index }}
labels:
statefulset.kubernetes.io/pod-name: {{ print $.Release.Name }}-etcd-{{ $index }}
spec:
storageClassName: manual
# You must also delete the hostpath on the node
persistentVolumeReclaimPolicy: {{ $.Values.etcd.persistence.reclaimPolicy }}
capacity:
storage: {{ $.Values.etcd.persistence.size | quote }}
accessModes:
- ReadWriteOnce
hostPath:
path: "/var/local/{{ $.Release.Name }}/etcd/pod-{{ $index }}"
---
{{- end }}
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if .Values.base.jaeger.enabled }}
apiVersion: jaegertracing.io/v1
kind: Jaeger
metadata:
name: jaeger
namespace: {{ .Release.Namespace }}
labels:
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
strategy: allInOne
ingress:
enabled: false
{{- include "jaeger_scheduling" . }}
query:
serviceType: NodePort
nodePort: 30012
storage:
type: memory
options:
memory:
max-traces: 100000
{{- end }}

View File

@@ -0,0 +1,16 @@
{{ if and (index .Values "localpv-provisioner" "enabled") (index .Values "loki-stack" "loki" "persistence" "enabled") (index .Values "loki-stack" "enabled") }}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: {{ tpl (( index .Values "loki-stack" "localpvScConfig" "basePath" ) | quote ) . }}
openebs.io/cas-type: local
name: {{ (tpl (index .Values "loki-stack" "localpvScConfig" "name") .) | required (print "StorageClass name for loki localpv storage cannot be empty") }}
provisioner: openebs.io/local
reclaimPolicy: {{ (index .Values "loki-stack" "localpvScConfig" "reclaimPolicy") }}
volumeBindingMode: {{ (index .Values "loki-stack" "localpvScConfig" "volumeBindingMode") }}
{{ end }}

View File

@@ -0,0 +1,20 @@
---
{{ if and (eq ( index .Values "loki-stack" "loki" "persistence" "storageClassName" ) "manual") ( index .Values "loki-stack" "loki" "persistence" "enabled" ) ( index .Values "loki-stack" "enabled" ) }}
apiVersion: v1
kind: PersistentVolume
metadata:
name: loki-volume-0
labels:
statefulset.kubernetes.io/pod-name: {{ .Release.Name }}-loki-0
spec:
storageClassName: manual
persistentVolumeReclaimPolicy: {{ index .Values "loki-stack" "loki" "persistence" "reclaimPolicy" }}
capacity:
storage: {{ index .Values "loki-stack" "loki" "persistence" "size" }}
accessModes:
- ReadWriteOnce
hostPath:
path: "/var/local/{{ .Release.Name }}/loki"
---
{{- end }}

View File

@@ -0,0 +1,108 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-agent-core
labels:
app: agent-core
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
replicas: 1
selector:
matchLabels:
app: agent-core
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
template:
metadata:
labels:
app: agent-core
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
serviceAccount: {{ .Release.Name }}-service-account
imagePullSecrets:
{{- include "base_pull_secrets" . }}
initContainers:
{{- include "base_init_core_containers" . }}
{{- if $pcName := include "priority_class_with_default" (dict "template" . "localPriorityClass" .Values.agents.core.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if $tolerations := include "tolerations_with_early_eviction" (dict "template" . "localTolerations" .Values.agents.core.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
containers:
- name: agent-core
resources:
limits:
cpu: {{ .Values.agents.core.resources.limits.cpu | quote }}
memory: {{ .Values.agents.core.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.agents.core.resources.requests.cpu | quote }}
memory: {{ .Values.agents.core.resources.requests.memory | quote }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-agent-core:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "-s{{ .Release.Name }}-etcd:{{ .Values.etcd.service.port }}"
- "--request-timeout={{ .Values.base.default_req_timeout }}"
- "--cache-period={{ .Values.base.cache_poll_period }}"{{ if .Values.base.jaeger.enabled }}
- "--jaeger={{ .Values.base.jaeger.agent.name }}:{{ .Values.base.jaeger.agent.port }}"{{ end }}
- "--grpc-server-addr=0.0.0.0:50051"
- "--pool-commitment={{ .Values.agents.core.capacity.thin.poolCommitment }}"
- "--snapshot-commitment={{ .Values.agents.core.capacity.thin.snapshotCommitment }}"
- "--volume-commitment-initial={{ .Values.agents.core.capacity.thin.volumeCommitmentInitial }}"
- "--volume-commitment={{ .Values.agents.core.capacity.thin.volumeCommitment }}"{{ if .Values.agents.core.partialRebuildWaitPeriod }}
- "--faulted-child-wait-period={{ .Values.agents.core.partialRebuildWaitPeriod }}"{{ end }}{{ if .Values.eventing.enabled }}
- "--events-url=nats://{{ .Release.Name }}-nats:4222"{{ end }}
ports:
- containerPort: 50051
env:
- name: RUST_LOG
value: {{ .Values.agents.core.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.agents.core.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.agents.core.logSilenceLevel }}
{{- end }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: agent-ha-cluster
resources:
limits:
cpu: {{ .Values.agents.ha.cluster.resources.limits.cpu | quote }}
memory: {{ .Values.agents.ha.cluster.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.agents.ha.cluster.resources.requests.cpu | quote }}
memory: {{ .Values.agents.ha.cluster.resources.requests.memory | quote }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-agent-ha-cluster:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "-g=0.0.0.0:50052"
- "--store=http://{{ .Release.Name }}-etcd:{{ .Values.etcd.service.port }}"
- "--core-grpc=https://{{ .Release.Name }}-agent-core:50051"{{ if .Values.base.jaeger.enabled }}
- "--jaeger={{ .Values.base.jaeger.agent.name }}:{{ .Values.base.jaeger.agent.port }}"{{ end }}
ports:
- containerPort: 50052
env:
- name: RUST_LOG
value: {{ .Values.agents.core.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.agents.core.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.agents.core.logSilenceLevel }}
{{- end }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-agent-core
labels:
app: agent-core
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
selector:
app: agent-core
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
ports:
- name: grpc
port: 50051
- name: ha-cluster
port: 50052

View File

@@ -0,0 +1,115 @@
{{- if .Values.agents.ha.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Release.Name }}-agent-ha-node
labels:
app: agent-ha-node
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
selector:
matchLabels:
app: agent-ha-node
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
minReadySeconds: 10
template:
metadata:
labels:
app: agent-ha-node
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
{{- include "base_init_ha_node_containers" . }}
imagePullSecrets:
{{- include "base_pull_secrets" . }}
{{- if $pcName := include "priority_class" (dict "template" . "localPriorityClass" .Values.agents.ha.node.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
nodeSelector:
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.csi.node.topology.nodeSelector }}
{{- range $key, $val := .Values.csi.node.topology.segments }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- if $tolerations := include "tolerations" (dict "template" . "localTolerations" .Values.agents.ha.node.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
containers:
- name: agent-ha-node
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-agent-ha-node:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
privileged: true
env:
- name: RUST_LOG
value: {{ .Values.agents.ha.node.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.agents.ha.node.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.agents.ha.node.logSilenceLevel }}
{{- end }}
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: RUST_BACKTRACE
value: "1"
args:
- "--node-name=$(MY_NODE_NAME)"
- "--csi-socket={{ .Values.csi.node.pluginMounthPath }}/{{ .Values.csi.node.socketPath }}"
- "--grpc-endpoint=$(MY_POD_IP):50053"
- "--cluster-agent=https://{{ .Release.Name }}-agent-core:50052"{{ if .Values.base.jaeger.enabled }}
- "--jaeger={{ .Values.base.jaeger.agent.name }}:{{ .Values.base.jaeger.agent.port }}"{{ end }}
volumeMounts:
- name: device
mountPath: /dev
- name: sys
mountPath: /sys
- name: run-udev
mountPath: /run/udev
- name: plugin-dir
mountPath: {{ .Values.csi.node.pluginMounthPath }}
resources:
limits:
cpu: {{ .Values.agents.ha.node.resources.limits.cpu | quote }}
memory: {{ .Values.agents.ha.node.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.agents.ha.node.resources.requests.cpu | quote }}
memory: {{ .Values.agents.ha.node.resources.requests.memory | quote }}
ports:
- containerPort: 50053
protocol: TCP
name: ha-node
volumes:
- name: device
hostPath:
path: /dev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
- name: run-udev
hostPath:
path: /run/udev
type: Directory
- name: plugin-dir
hostPath:
path: {{ .Values.csi.node.kubeletDir }}/plugins/io.openebs.mayastor/
type: DirectoryOrCreate
{{- end }}

View File

@@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-api-rest
labels:
app: api-rest
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
replicas: {{ .Values.apis.rest.replicaCount }}
selector:
matchLabels:
app: api-rest
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
template:
metadata:
labels:
app: api-rest
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
imagePullSecrets:
{{- include "base_pull_secrets" . }}
initContainers:
{{- include "base_init_containers" . }}
{{- if $pcName := include "priority_class_with_default" (dict "template" . "localPriorityClass" .Values.apis.rest.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if $tolerations := include "tolerations_with_early_eviction" (dict "template" . "localTolerations" .Values.apis.rest.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
containers:
- name: api-rest
resources:
limits:
cpu: {{ .Values.apis.rest.resources.limits.cpu | quote }}
memory: {{ .Values.apis.rest.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.apis.rest.resources.requests.cpu | quote }}
memory: {{ .Values.apis.rest.resources.requests.memory | quote }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-api-rest:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "--dummy-certificates"
- "--no-auth"
- "--http=0.0.0.0:8081"
- "--request-timeout={{ .Values.base.default_req_timeout }}"{{ if .Values.base.jaeger.enabled }}
- "--jaeger={{ .Values.base.jaeger.agent.name }}:{{ .Values.base.jaeger.agent.port }}"{{ end }}
- "--core-grpc=https://{{ .Release.Name }}-agent-core:50051"
ports:
- containerPort: 8080
- containerPort: 8081
env:
- name: RUST_LOG
value: {{ .Values.apis.rest.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.apis.rest.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.apis.rest.logSilenceLevel }}
{{- end }}

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-api-rest
labels:
app: api-rest
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
type: {{ .Values.apis.rest.service.type }}
selector:
app: api-rest
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
ports:
- port: 8080
name: https
targetPort: 8080
protocol: TCP
{{- if eq .Values.apis.rest.service.type "NodePort" }}
nodePort: {{ .Values.apis.rest.service.nodePorts.https }}
{{- end }}
- port: 8081
name: http
targetPort: 8081
protocol: TCP
{{- if eq .Values.apis.rest.service.type "NodePort" }}
nodePort: {{ .Values.apis.rest.service.nodePorts.http }}
{{- end }}

View File

@@ -0,0 +1,118 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-csi-controller
labels:
app: csi-controller
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
replicas: 1
selector:
matchLabels:
app: csi-controller
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
template:
metadata:
labels:
app: csi-controller
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
hostNetwork: true
serviceAccount: {{ .Release.Name }}-service-account
dnsPolicy: ClusterFirstWithHostNet
imagePullSecrets:
{{- include "base_pull_secrets" . }}
initContainers:
{{- include "jaeger_agent_init_container" . }}
{{- include "rest_agent_init_container" . }}
{{- if $pcName := include "priority_class" (dict "template" . "localPriorityClass" .Values.csi.controller.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if $tolerations := include "tolerations" (dict "template" . "localTolerations" .Values.csi.controller.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.csi.image.registry }}/{{ .Values.csi.image.repo }}/csi-provisioner:{{ .Values.csi.image.provisionerTag }}"
args:
- "--v=2"
- "--csi-address=$(ADDRESS)"
- "--feature-gates=Topology=true"
- "--strict-topology=false"
- "--default-fstype=ext4"
- "--extra-create-metadata" # This is needed for volume group feature to work
- "--timeout=36s"
- "--worker-threads=10" # 10 for create and 10 for delete
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: {{ .Values.csi.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: "{{ .Values.csi.image.registry }}/{{ .Values.csi.image.repo }}/csi-attacher:{{ .Values.csi.image.attacherTag }}"
args:
- "--v=2"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: {{ .Values.csi.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshotter
image: "{{ .Values.csi.image.registry }}/{{ .Values.csi.image.repo }}/csi-snapshotter:{{ .Values.csi.image.snapshotterTag }}"
args:
- "--v=2"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: {{ .Values.csi.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshot-controller
args:
- "--v=2"
- "--leader-election=false" # since we are running single container
image: "{{ .Values.csi.image.registry }}/{{ .Values.csi.image.repo }}/snapshot-controller:{{ .Values.csi.image.snapshotControllerTag }}"
imagePullPolicy: {{ .Values.csi.image.pullPolicy }}
- name: csi-controller
resources:
limits:
cpu: {{ .Values.csi.controller.resources.limits.cpu | quote }}
memory: {{ .Values.csi.controller.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.csi.controller.resources.requests.cpu | quote }}
memory: {{ .Values.csi.controller.resources.requests.memory | quote }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-csi-controller:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "--csi-socket=/var/lib/csi/sockets/pluginproxy/csi.sock"
- "--rest-endpoint=http://{{ .Release.Name }}-api-rest:8081"{{ if .Values.base.jaeger.enabled }}
- "--jaeger={{ .Values.base.jaeger.agent.name }}:{{ .Values.base.jaeger.agent.port }}"{{ end }}
{{- range $key, $val := .Values.csi.node.topology.segments }}
- "--node-selector={{ $key }}={{ $val }}"
{{- end }}
env:
- name: RUST_LOG
value: {{ .Values.csi.controller.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.csi.controller.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.csi.controller.logSilenceLevel }}
{{- end }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
volumes:
- name: socket-dir
emptyDir:

View File

@@ -0,0 +1,156 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Release.Name }}-csi-node
labels:
app: csi-node
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{- range $key, $val := .Values.csi.node.topology.segments }}
{{ $key }}: {{ $val }}
{{- end }}
spec:
selector:
matchLabels:
app: csi-node
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
minReadySeconds: 10
template:
metadata:
labels:
app: csi-node
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
serviceAccount: {{ .Release.Name }}-service-account
hostNetwork: true
imagePullSecrets:
{{- include "base_pull_secrets" . }}
{{- if $pcName := include "priority_class" (dict "template" . "localPriorityClass" .Values.csi.node.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
nodeSelector:
{{- if .Values.nodeSelector }}
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.csi.node.topology.nodeSelector }}
{{- range $key, $val := .Values.csi.node.topology.segments }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- if $tolerations := include "tolerations" (dict "template" . "localTolerations" .Values.csi.node.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
# NOTE: Each container must have mem/cpu limits defined in order to
# belong to Guaranteed QoS class, hence can never get evicted in case of
# pressure unless they exceed those limits. limits and requests must be
# the same.
containers:
- name: csi-node
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-csi-node:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
# we need privileged because we mount filesystems and use mknod
securityContext:
privileged: true
env:
- name: RUST_LOG
value: {{ .Values.csi.node.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.csi.node.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.csi.node.logSilenceLevel }}
{{- end }}
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: RUST_BACKTRACE
value: "1"
args:
- "--csi-socket={{ .Values.csi.node.pluginMounthPath }}/{{ .Values.csi.node.socketPath }}"
- "--node-name=$(MY_NODE_NAME)"
- "--grpc-endpoint=$(MY_POD_IP):10199"{{ if .Values.csi.node.nvme.io_timeout }}
- "--nvme-core-io-timeout={{ .Values.csi.node.nvme.io_timeout }}"{{ end }}{{ if .Values.csi.node.nvme.ctrl_loss_tmo }}
- "--nvme-ctrl-loss-tmo={{ .Values.csi.node.nvme.ctrl_loss_tmo }}"{{ end }}{{ if .Values.csi.node.nvme.keep_alive_tmo }}
- "--nvme-keep-alive-tmo={{ .Values.csi.node.nvme.keep_alive_tmo }}"{{ end }}
- "--nvme-nr-io-queues={{ include "coreCount" . }}"
{{- range $key, $val := .Values.csi.node.topology.segments }}
- "--node-selector={{ $key }}={{ $val }}"
{{- end }}
command:
- csi-node
volumeMounts:
- name: device
mountPath: /dev
- name: sys
mountPath: /sys
- name: run-udev
mountPath: /run/udev
- name: plugin-dir
mountPath: {{ .Values.csi.node.pluginMounthPath }}
- name: kubelet-dir
mountPath: {{ .Values.csi.node.kubeletDir }}
mountPropagation: "Bidirectional"
resources:
limits:
cpu: {{ .Values.csi.node.resources.limits.cpu | quote }}
memory: {{ .Values.csi.node.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.csi.node.resources.requests.cpu | quote }}
memory: {{ .Values.csi.node.resources.requests.memory | quote }}
- name: csi-driver-registrar
image: "{{ .Values.csi.image.registry }}/{{ .Values.csi.image.repo }}/csi-node-driver-registrar:{{ .Values.csi.image.registrarTag }}"
imagePullPolicy: {{ .Values.csi.image.pullPolicy }}
args:
- "--csi-address={{ .Values.csi.node.pluginMounthPath }}/{{ .Values.csi.node.socketPath }}"
- "--kubelet-registration-path={{ .Values.csi.node.kubeletDir }}/plugins/io.openebs.mayastor/csi.sock"
volumeMounts:
- name: plugin-dir
mountPath: {{ .Values.csi.node.pluginMounthPath }}
- name: registration-dir
mountPath: /registration
resources:
limits:
cpu: "100m"
memory: "50Mi"
requests:
cpu: "100m"
memory: "50Mi"
# Mayastor node plugin gRPC server
ports:
- containerPort: 10199
protocol: TCP
name: mayastor-node
volumes:
- name: device
hostPath:
path: /dev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
- name: run-udev
hostPath:
path: /run/udev
type: Directory
- name: registration-dir
hostPath:
path: {{ .Values.csi.node.kubeletDir }}/plugins_registry/
type: Directory
- name: plugin-dir
hostPath:
path: {{ .Values.csi.node.kubeletDir }}/plugins/io.openebs.mayastor/
type: DirectoryOrCreate
- name: kubelet-dir
hostPath:
path: {{ .Values.csi.node.kubeletDir }}
type: Directory

View File

@@ -0,0 +1,149 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Release.Name }}-io-engine
labels:
app: io-engine
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
selector:
matchLabels:
app: io-engine
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
updateStrategy:
type: OnDelete
minReadySeconds: 10
template:
metadata:
labels:
app: io-engine
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
imagePullSecrets:
{{- include "base_pull_secrets" . }}
hostNetwork: true
# To resolve services in the namespace
dnsPolicy: ClusterFirstWithHostNet
nodeSelector: {{- .Values.io_engine.nodeSelector | toYaml | nindent 8 }}
{{- if $pcName := include "priority_class" (dict "template" . "localPriorityClass" .Values.io_engine.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
{{- if $tolerations := include "tolerations" (dict "template" . "localTolerations" .Values.io_engine.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
initContainers:
{{- include "base_init_containers" . }}
containers:
{{- if .Values.base.metrics.enabled }}
- name: metrics-exporter-pool
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-metrics-exporter-pool:{{ default .Values.image.tag .Values.image.repoTags.extensions }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
args:
- "-p{{ .Values.base.metrics.pollingInterval }}"
- "--api-versions={{ .Values.io_engine.api }}"
command:
- metrics-exporter-pool
ports:
- containerPort: 9502
protocol: TCP
name: metrics
{{- end }}
- name: io-engine
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-io-engine:{{ default .Values.image.tag .Values.image.repoTags.dataPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: RUST_LOG
value: {{ .Values.io_engine.logLevel }}
- name: NVME_QPAIR_CONNECT_ASYNC
value: "true"
- name: NVMF_TCP_MAX_QUEUE_DEPTH
value: "32"
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NEXUS_NVMF_ANA_ENABLE
value: "1"
- name: NEXUS_NVMF_RESV_ENABLE
value: "1"
args:
# The -l argument accepts cpu-list. Indexing starts at zero.
# For example -l 1,2,10-20 means use core 1, 2, 10 to 20.
# Note: Ensure that the CPU resources are updated accordingly.
# If you use 2 CPUs, the CPU: field should also read 2.
- "-g$(MY_POD_IP)"
- "-N$(MY_NODE_NAME)"
- "-Rhttps://{{ .Release.Name }}-agent-core:50051"
- "-y/var/local/io-engine/config.yaml"
- "-l{{ include "cpuFlag" . }}"
- "-p={{ .Release.Name }}-etcd:{{ .Values.etcd.service.port }}"{{ if .Values.io_engine.target.nvmf.ptpl }}
- "--ptpl-dir=/var/local/io-engine/ptpl/"{{ end }}
- "--api-versions={{ .Values.io_engine.api }}"{{ if .Values.io_engine.target.nvmf.iface }}
- "-T={{ .Values.io_engine.target.nvmf.iface }}"{{ end }}{{ if .Values.io_engine.envcontext }}
- "--env-context=--{{ .Values.io_engine.envcontext }}"{{ end }}{{ if .Values.io_engine.reactorFreezeDetection.enabled }}
- "--reactor-freeze-detection"{{ end }}
- "--tgt-crdt={{ .Values.io_engine.target.nvmf.hostCmdRetryDelay.crdt1 }}"
command:
- io-engine
securityContext:
privileged: true
volumeMounts:
- name: device
mountPath: /dev
- name: udev
mountPath: /run/udev
- name: dshm
mountPath: /dev/shm
- name: configlocation
mountPath: /var/local/io-engine/
- name: hugepage
mountPath: /dev/hugepages
resources:
limits:
cpu: {{ .Values.io_engine.resources.limits.cpu | default (include "coreCount" .) | quote }}
memory: {{ .Values.io_engine.resources.limits.memory | quote }}
hugepages-2Mi: {{ .Values.io_engine.resources.limits.hugepages2Mi | quote }}
requests:
cpu: {{ .Values.io_engine.resources.requests.cpu | default (include "coreCount" .) | quote }}
memory: {{ .Values.io_engine.resources.requests.memory | quote }}
hugepages-2Mi: {{ .Values.io_engine.resources.requests.hugepages2Mi | quote }}
ports:
- containerPort: 10124
protocol: TCP
name: io-engine
volumes:
- name: device
hostPath:
path: /dev
type: Directory
- name: udev
hostPath:
path: /run/udev
type: Directory
- name: dshm
emptyDir:
medium: Memory
sizeLimit: "1Gi"
- name: hugepage
emptyDir:
medium: HugePages
- name: configlocation
hostPath:
path: /var/local/io-engine/
type: DirectoryOrCreate

View File

@@ -0,0 +1,19 @@
{{- if .Values.base.metrics.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-metrics-exporter-pool
labels:
app: metrics-exporter-pool
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
ports:
- name: metrics
port: 9502
targetPort: 9502
protocol: TCP
selector:
app: io-engine
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,80 @@
{{- if .Values.obs.callhome.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-obs-callhome
labels:
app: obs-callhome
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
replicas: 1
selector:
matchLabels:
app: obs-callhome
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
template:
metadata:
labels:
app: obs-callhome
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
serviceAccountName: {{ .Release.Name }}-service-account
imagePullSecrets:
{{- include "base_pull_secrets" . }}
{{- if $pcName := include "priority_class" (dict "template" . "localPriorityClass" .Values.obs.callhome.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if $tolerations := include "tolerations" (dict "template" . "localTolerations" .Values.obs.callhome.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
containers:
- name: obs-callhome
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-obs-callhome:{{ default .Values.image.tag .Values.image.repoTags.extensions }}"
args:
- "-e http://{{ .Release.Name }}-api-rest:8081"
- "-n {{ .Release.Namespace }}"{{ if .Values.eventing.enabled }}
- "--aggregator-url=http://{{ .Release.Name }}-obs-callhome-stats:9090/stats"{{ end }}
{{ if .Values.obs.callhome.sendReport }}
- "--send-report"
{{ end }}
env:
- name: RUST_LOG
value: {{ .Values.obs.callhome.logLevel }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
limits:
cpu: {{ .Values.obs.callhome.resources.limits.cpu | quote }}
memory: {{ .Values.obs.callhome.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.obs.callhome.resources.requests.cpu | quote }}
memory: {{ .Values.obs.callhome.resources.requests.memory | quote }}
{{- if .Values.eventing.enabled }}
- name: obs-callhome-stats
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-obs-callhome-stats:{{ default .Values.image.tag .Values.image.repoTags.extensions }}"
args:
- "--namespace={{ .Release.Namespace }}"
- "--release-name={{ .Release.Name }}"
- "--mbus-url=nats://{{ .Release.Name }}-nats:4222"
ports:
- containerPort: 9090
protocol: TCP
name: stats
env:
- name: RUST_LOG
value: {{ .Values.obs.stats.logLevel }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
limits:
cpu: {{ .Values.obs.stats.resources.limits.cpu | quote }}
memory: {{ .Values.obs.stats.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.obs.stats.resources.requests.cpu | quote }}
memory: {{ .Values.obs.stats.resources.requests.memory | quote }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,29 @@
{{- if .Values.obs.callhome.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-obs-callhome-stats
labels:
app: obs-callhome
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
ports:
- port: 9090
name: https
targetPort: 9090
protocol: TCP
{{- if eq .Values.obs.stats.service.type "NodePort" }}
nodePort: {{ .Values.obs.stats.service.nodePorts.https }}
{{- end }}
- port: 9091
name: http
targetPort: 9091
protocol: TCP
{{- if eq .Values.obs.stats.service.type "NodePort" }}
nodePort: {{ .Values.obs.stats.service.nodePorts.http }}
{{- end }}
selector:
app: obs-callhome
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-operator-diskpool
labels:
app: operator-diskpool
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
spec:
replicas: 1
selector:
matchLabels:
app: operator-diskpool
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
template:
metadata:
labels:
app: operator-diskpool
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
{{ include "label_prefix" . }}/logging: "true"
spec:
serviceAccount: {{ .Release.Name }}-service-account
imagePullSecrets:
{{- include "base_pull_secrets" . }}
initContainers:
{{- include "base_init_containers" . }}
{{- if $pcName := include "priority_class" (dict "template" . "localPriorityClass" .Values.operators.pool.priorityClassName) }}
priorityClassName: {{ $pcName }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if $tolerations := include "tolerations" (dict "template" . "localTolerations" .Values.operators.pool.tolerations) }}
tolerations: {{ $tolerations }}
{{- end }}
containers:
- name: operator-diskpool
resources:
limits:
cpu: {{ .Values.operators.pool.resources.limits.cpu | quote }}
memory: {{ .Values.operators.pool.resources.limits.memory | quote }}
requests:
cpu: {{ .Values.operators.pool.resources.requests.cpu | quote }}
memory: {{ .Values.operators.pool.resources.requests.memory | quote }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repo }}/{{ .Chart.Name }}-operator-diskpool:{{ default .Values.image.tag .Values.image.repoTags.controlPlane }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "-e http://{{ .Release.Name }}-api-rest:8081"
- "-n{{ .Release.Namespace }}"
- "--request-timeout={{ .Values.base.default_req_timeout }}"
- "--interval={{ .Values.base.cache_poll_period }}"{{ if .Values.base.jaeger.enabled }}
- "--jaeger={{ .Values.base.jaeger.agent.name }}:{{ .Values.base.jaeger.agent.port }}"{{ end }}
env:
- name: RUST_LOG
value: {{ .Values.operators.pool.logLevel }}
{{- if default .Values.base.logSilenceLevel .Values.operators.pool.logSilenceLevel }}
- name: RUST_LOG_SILENCE
value: {{ default .Values.base.logSilenceLevel .Values.operators.pool.logSilenceLevel }}
{{- end }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name

View File

@@ -0,0 +1,7 @@
apiVersion: scheduling.k8s.io/v1
description: Used for critical pods that must run in the cluster, which can be moved to another node if necessary.
kind: PriorityClass
metadata:
name: {{ .Release.Name }}-cluster-critical
preemptionPolicy: PreemptLowerPriority
value: 1000000000

View File

@@ -0,0 +1,118 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-service-account
namespace: {{ .Release.Namespace }}
labels:
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-role
labels:
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
rules:
# must create mayastor crd if it doesn't exist, replace if exist,
# merge schema to existing CRD.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get", "update", "list", "patch", "replace"]
# must update stored_version in status to include new schema only.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions/status"]
verbs: ["get", "update", "patch"]
# must read mayastorpools info. This is needed to handle upgrades from v1.
- apiGroups: [ "openebs.io" ]
resources: [ "mayastorpools" ]
verbs: ["get", "list", "patch", "delete", "deletecollection"]
# must read diskpool info
- apiGroups: ["openebs.io"]
resources: ["diskpools"]
verbs: ["get", "list", "watch", "update", "replace", "patch", "create"]
# must update diskpool status
- apiGroups: ["openebs.io"]
resources: ["diskpools/status"]
verbs: ["update", "patch"]
# must read cm info
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "get", "update", "patch"]
# must get deployments info
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list"]
# external provisioner & attacher
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
# external provisioner
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# external snapshotter and snapshot-controller
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create","get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
# external attacher
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
# CSI nodes must be listed
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
# get kube-system namespace to retrieve Uid
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-role-binding
labels:
{{ include "label_prefix" . }}/release: {{ .Release.Name }}
{{ include "label_prefix" . }}/version: {{ .Chart.Version }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-service-account
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Release.Name }}-cluster-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,9 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: {{ .Release.Name }}-single-replica
parameters:
repl: '1'
protocol: 'nvmf'
ioTimeout: '60'
provisioner: io.openebs.csi-mayastor