Files
dsk-iac/ansible/roles/helm_install/files/druid/override-values.yaml_old
2022-12-09 13:38:44 +09:00

172 lines
4.6 KiB
Plaintext

configVars:
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service"]'
druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid
broker:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
DRUID_MAXDIRECTMEMORYSIZE: 1g
druid_server_http_maxSubqueryRows: "1000000"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
coordinator:
config:
DRUID_XMX: 4g
DRUID_XMS: 4g
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
overlord:
javaOpts: "-Xms4G -Xmx4G"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
historical:
config:
DRUID_XMX: 2g
DRUID_XMS: 2g
druid_processing_numThreads: 3
druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]'
druid_server_maxSize: '500g'
persistence:
size: "500Gi"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
middleManager:
config:
DRUID_XMX: 9g
DRUID_XMS: 9g
druid_indexer_runner_javaOptsArray: '["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=3g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]'
druid_worker.capacity: 8
druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB'
druid_indexer_fork_property_druid_processing_numThreads: 1
druid_indexer_fork_property_druid_processing_numMergeBuffers: 2
persistence:
size: "500Gi"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
router:
serviceType: NodePort
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
# zookeeper:
zookeeper:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# postgres:
# ------------------------------------------------------------------------------
# postgresql:
postgresql:
master:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
slave:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# Secrets