image: # -- Image registry to pull our product images registry: docker.io # -- Image registry's namespace repo: openebs # -- Release tag for our images tag: v2.4.0 repoTags: # Note: Below image tag configuration is optional and typically should never be # used. Setting specific image tags for the different repositories proves useful # for some integration testing scenarios. Use the 'tag' option above to set # release/pre-release container image tags. # The below tag values will be picked for images by default. # If not specified, 'tag' option provided above will be picked. controlPlane: "" dataPlane: "" extensions: "" # -- ImagePullPolicy for our images pullPolicy: IfNotPresent # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # Note that if multi-arch images support 'kubernetes.io/arch: amd64' # should be removed and set 'nodeSelector' to empty '{}' as default value. nodeSelector: kubernetes.io/arch: amd64 # -- Pod scheduling priority. # Setting this value will apply to all components except the external Chart dependencies. # If any component has `priorityClassName` set, then this value would be overridden for that component. # For external components like etcd, jaeger or loki-stack, PriorityClass can only be set at component level. priorityClassName: "" earlyEvictionTolerations: - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 5 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 5 # -- Tolerations to be applied to all components except external Chart dependencies. # If any component has tolerations set, then it would override this value. # For external components like etcd, jaeger and loki-stack, tolerations can only be set at component level. tolerations: [] base: # -- Request timeout for rest & core agents default_req_timeout: 5s # -- Cache timeout for core agent & diskpool deployment cache_poll_period: 30s # -- Silence specific module components logSilenceLevel: initContainers: enabled: true containers: - name: agent-core-grpc-probe image: busybox:latest command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-agent-core 50051; do date; echo "Waiting for agent-core-grpc services..."; sleep 1; done;'] - name: etcd-probe image: busybox:latest command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-etcd {{.Values.etcd.service.port}}; do date; echo "Waiting for etcd..."; sleep 1; done;'] initHaNodeContainers: enabled: true containers: - name: agent-cluster-grpc-probe image: busybox:latest command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-agent-core 50052; do date; echo "Waiting for agent-cluster-grpc services..."; sleep 1; done;'] initCoreContainers: enabled: true containers: - name: etcd-probe image: busybox:latest command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-etcd {{.Values.etcd.service.port}}; do date; echo "Waiting for etcd..."; sleep 1; done;'] # docker-secrets required to pull images if the container registry from image.Registry is protected imagePullSecrets: # -- Enable imagePullSecrets for pulling our container images enabled: false # Name of the imagePullSecret in the installed namespace secrets: - name: login metrics: # -- Enable the metrics exporter enabled: true # metrics refresh time # WARNING: Lowering pollingInterval value will affect performance adversely pollingInterval: "5m" jaeger: # -- Enable jaeger tracing enabled: false initContainer: true agent: name: jaeger-agent port: 6831 initContainer: - name: jaeger-probe image: busybox:latest command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 -u {{.Values.base.jaeger.agent.name}} {{.Values.base.jaeger.agent.port}}; do date; echo "Waiting for jaeger..."; sleep 1; done;'] initRestContainer: enabled: true initContainer: - name: api-rest-probe image: busybox:latest command: ['sh', '-c', 'trap "exit 1" TERM; until nc -vzw 5 {{ .Release.Name }}-api-rest 8081; do date; echo "Waiting for REST API endpoint to become available"; sleep 1; done;'] operators: pool: # -- Log level for diskpool operator service logLevel: info resources: limits: # -- Cpu limits for diskpool operator cpu: "100m" # -- Memory limits for diskpool operator memory: "32Mi" requests: # -- Cpu requests for diskpool operator cpu: "50m" # -- Memory requests for diskpool operator memory: "16Mi" # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global priorityClassName: "" jaeger-operator: # Name of jaeger operator name: "{{ .Release.Name }}" crd: # Install jaeger CRDs install: false jaeger: # Install jaeger-operator create: false rbac: # Create a clusterRole for Jaeger clusterRole: true tolerations: [] priorityClassName: "" agents: core: # -- Log level for the core service logLevel: info capacity: thin: # -- The allowed pool commitment limit when dealing with thin provisioned volumes. # Example: If the commitment is 250 and the pool is 10GiB we can overcommit the pool # up to 25GiB (create 2 10GiB and 1 5GiB volume) but no further. poolCommitment: "250%" # -- When creating replicas for an existing volume, each replica pool must have at least # this much free space percentage of the volume size. # Example: if this value is 40, the pool has 40GiB free, then the max volume size allowed # to be created on the pool is 100GiB. volumeCommitment: "40%" # -- Same as the `volumeCommitment` argument, but applicable only when creating replicas # for a new volume. volumeCommitmentInitial: "40%" # -- When creating snapshots for an existing volume, each replica pool must have at least # this much free space percentage of the volume size. # Example: if this value is 40, the pool has 40GiB free, then the max volume size allowed # to be snapped on the pool is 100GiB. snapshotCommitment: "40%" resources: limits: # -- Cpu limits for core agents cpu: "1000m" # -- Memory limits for core agents memory: "128Mi" requests: # -- Cpu requests for core agents cpu: "500m" # -- Memory requests for core agents memory: "32Mi" # -- If a faulted replica comes back online within this time period then it will be # rebuilt using the partial rebuild capability (using a log of missed IO), hence a bit # faster depending on the log size. Otherwise, the replica will be fully rebuilt. # A blank value "" means internally derived value will be used. partialRebuildWaitPeriod: "" # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global. # If both local and global are not set, the final deployment manifest has a mayastor custom critical priority class assigned to the pod by default. # Refer the `templates/_helpers.tpl` and `templates/mayastor/agents/core/agent-core-deployment.yaml` for more details. priorityClassName: "" ha: enabled: true node: # -- Log level for the ha node service logLevel: info resources: limits: # -- Cpu limits for ha node agent cpu: "100m" # -- Memory limits for ha node agent memory: "64Mi" requests: # -- Cpu requests for ha node agent cpu: "100m" # -- Memory requests for ha node agent memory: "64Mi" # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global priorityClassName: "" cluster: # -- Log level for the ha cluster service logLevel: info resources: limits: # -- Cpu limits for ha cluster agent cpu: "100m" # -- Memory limits for ha cluster agent memory: "64Mi" requests: # -- Cpu requests for ha cluster agent cpu: "100m" # -- Memory requests for ha cluster agent memory: "16Mi" apis: rest: # -- Log level for the rest service logLevel: info # -- Number of replicas of rest replicaCount: 1 resources: limits: # -- Cpu limits for rest cpu: "100m" # -- Memory limits for rest memory: "64Mi" requests: # -- Cpu requests for rest cpu: "50m" # -- Memory requests for rest memory: "32Mi" # Rest service parameters define how the rest service is exposed service: # -- Rest K8s service type type: ClusterIP # Ports from where rest endpoints are accessible from outside the cluster, only valid if type is NodePort nodePorts: # NodePort associated with http port http: 30011 # NodePort associated with https port https: 30010 # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global. # If both local and global are not set, the final deployment manifest has a mayastor custom critical priority class assigned to the pod by default. # Refer the `templates/_helpers.tpl` and `templates/mayastor/apis/rest/api-rest-deployment.yaml` for more details. priorityClassName: "" csi: image: # -- Image registry to pull all CSI Sidecar images registry: registry.k8s.io # -- Image registry's namespace repo: sig-storage # -- imagePullPolicy for all CSI Sidecar images pullPolicy: IfNotPresent # -- csi-provisioner image release tag provisionerTag: v3.5.0 # -- csi-attacher image release tag attacherTag: v4.3.0 # -- csi-snapshotter image release tag snapshotterTag: v6.2.1 # -- csi-snapshot-controller image release tag snapshotControllerTag: v6.2.1 # -- csi-node-driver-registrar image release tag registrarTag: v2.8.0 controller: # -- Log level for the csi controller logLevel: info resources: limits: # -- Cpu limits for csi controller cpu: "32m" # -- Memory limits for csi controller memory: "128Mi" requests: # -- Cpu requests for csi controller cpu: "16m" # -- Memory requests for csi controller memory: "64Mi" # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global priorityClassName: "" node: logLevel: info topology: segments: openebs.io/csi-node: mayastor # -- Add topology segments to the csi-node daemonset node selector nodeSelector: false resources: limits: # -- Cpu limits for csi node plugin cpu: "100m" # -- Memory limits for csi node plugin memory: "128Mi" requests: # -- Cpu requests for csi node plugin cpu: "100m" # -- Memory requests for csi node plugin memory: "64Mi" nvme: # -- The nvme_core module io timeout in seconds io_timeout: "30" # -- The ctrl_loss_tmo (controller loss timeout) in seconds ctrl_loss_tmo: "1980" # Kato (keep alive timeout) in seconds keep_alive_tmo: "" # -- The kubeletDir directory for the csi-node plugin kubeletDir: /var/lib/kubelet pluginMounthPath: /csi socketPath: csi.sock # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global priorityClassName: "" io_engine: # -- Log level for the io-engine service logLevel: info api: "v1" target: nvmf: # -- NVMF target interface (ip, mac, name or subnet) iface: "" # -- Reservations Persist Through Power Loss State ptpl: true # NVMF target Command Retry Delay for volume target initiators hostCmdRetryDelay: # A command retry delay in seconds. A value of 0 means no delay, host may retry immediately crdt1: 30 # -- Pass additional arguments to the Environment Abstraction Layer. # Example: --set {product}.envcontext=iova-mode=pa envcontext: "" reactorFreezeDetection: enabled: false # -- The number of cores that each io-engine instance will bind to. cpuCount: "2" # -- If not empty, overrides the cpuCount and explicitly sets the list of cores. # Example: --set='io_engine.coreList={30,31}' coreList: [] # -- Node selectors to designate storage nodes for diskpool creation # Note that if multi-arch images support 'kubernetes.io/arch: amd64' # should be removed. nodeSelector: openebs.io/engine: mayastor kubernetes.io/arch: amd64 resources: limits: # -- Cpu limits for the io-engine cpu: "" # -- Memory limits for the io-engine memory: "1Gi" # -- Hugepage size available on the nodes hugepages2Mi: "2Gi" requests: # -- Cpu requests for the io-engine cpu: "" # -- Memory requests for the io-engine memory: "1Gi" # -- Hugepage size available on the nodes hugepages2Mi: "2Gi" # -- Set tolerations, overrides global tolerations: [] # -- Set PriorityClass, overrides global priorityClassName: "" etcd: # Configuration for etcd's localpv hostpath storage class. localpvScConfig: # Name of etcd's localpv hostpath storage class. name: "mayastor-etcd-localpv" # -- Host path where local etcd data is stored in. basePath: "/var/local/localpv-hostpath/{{ .Release.Name }}/etcd" # -- ReclaimPolicy of etcd's localpv hostpath storage class. reclaimPolicy: Delete # -- VolumeBindingMode of etcd's localpv hostpath storage class. volumeBindingMode: WaitForFirstConsumer # Pod labels; okay to remove the openebs logging label if required podLabels: app: etcd openebs.io/logging: "true" # -- Number of replicas of etcd replicaCount: 3 # Kubernetes Cluster Domain clusterDomain: cluster.local # TLS authentication for client-to-server communications # ref: https://etcd.io/docs/current/op-guide/security/ client: secureTransport: false # TLS authentication for server-to-server communications # ref: https://etcd.io/docs/current/op-guide/security/ peer: secureTransport: false # Enable persistence using Persistent Volume Claims persistence: # -- If true, use a Persistent Volume Claim. If false, use emptyDir. enabled: true # -- Will define which storageClass to use in etcd's StatefulSets. Options: #
- `"manual"` - Will provision a hostpath PV on the same node.
# - `""` (empty) - Will use the default StorageClass on the cluster.
- `"manual"` - Will provision a hostpath PV on the same node.
# - `""` (empty) - Will use the default StorageClass on the cluster.