Files
dsk-iac/ansible/roles/helm_install/files/druid/override-values.yaml
2022-12-09 13:38:44 +09:00

226 lines
7.0 KiB
YAML

configVars:
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service", "prometheus-emitter","druid-s3-extensions"]'
druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid
# integration druid exporter configuration
druid_emitter: prometheus
druid_emitter_prometheus_strategy: exporter
druid_emitter_prometheus_port: "9000"
druid_monitoring_monitors: '["org.apache.druid.java.util.metrics.JvmMonitor", "org.apache.druid.java.util.metrics.JvmThreadsMonitor"]'
# 폴더 생성이 이상함. 옵션 추후 다시 확인 필요
druid_storage_type: s3
druid_storage_bucket: druid.dev.datasaker.io
druid_storage_baseKey: druid-data/segments
druid_s3_accessKey:
druid_s3_secretKey:
AWS_REGION: "ap-northeast-2"
druid_s3_forceGlobalBucketAccessEnabled: "false"
druid_storage_disableAcl: "true"
druid_indexer_logs_type: s3
druid_indexer_logs_s3Bucket: druid.dev.datasaker.io
druid_indexer_logs_s3Prefix: druid-data/logs
druid_indexer_logs_disableAcl: "true"
druid_s3_endpoint_signingRegion: "ap-northeast-2"
druid_s3_endpoint_url: "https://s3.ap-northeast-2.amazonaws.com/druid.dev.datasaker.io/druid-data"
druid_s3_protocol: "https"
druid_s3_enablePathStyleAccess: "true"
broker:
config:
DRUID_XMX: 8g
DRUID_XMS: 8g
DRUID_MAXDIRECTMEMORYSIZE: 12g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
druid_server_http_maxSubqueryRows: "1000000"
druid_server_http_numThreads: 60
druid_broker_http_numConnections: 50
druid_broker_http_maxQueuedBytes: '10MiB'
druid_processing_numMergeBuffers: 6
druid_processing_buffer_sizeBytes: '500MiB'
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- middle
coordinator:
config:
DRUID_XMX: 8g
DRUID_XMS: 8g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- middle
overlord:
javaOpts: "-Xms4G -Xmx4G"
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- middle
historical:
config:
DRUID_XMX: 8g
DRUID_XMS: 8g
DRUID_MAXDIRECTMEMORYSIZE: 12g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
druid_server_http_numThreads: 60
druid_processing_numThreads: 16
druid_processing_numMergeBuffers: 4
druid_processing_buffer_sizeBytes: '500MiB'
druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"300g"}]'
druid_server_maxSize: '800g'
druid_historical_cache_useCache: true
druid_historical_cache_populateCache: true
druid_cache_type: 'caffeine'
druid_cache_sizeInBytes: '256MiB'
persistence:
enabled: false
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- large
middleManager:
config:
DRUID_XMX: 128m
DRUID_XMS: 128m
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
druid_indexer_runner_javaOptsArray: '["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=4g", "-XX:+UseG1GC", "-Duser. timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul. LogManager"]'
druid_indexer_task_baseTaskDir: var/druid/task
druid_worker_capacity: 20
druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '500MiB'
druid_indexer_fork_property_druid_processing_numThreads: 4
druid_indexer_fork_property_druid_processing_numMergeBuffers: 2
persistence:
enabled: false
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- large
router:
config:
DRUID_XMX: 1g
DRUID_XMS: 1g
DRUID_MAXDIRECTMEMORYSIZE: 3g
JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof"
serviceType: NodePort
# templates/router/service.yaml에서 nodePort 속성 추가
nodePort: 30888
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/druid-size
operator: In
values:
- small
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
zookeeper:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
# ------------------------------------------------------------------------------
# postgres:
# ------------------------------------------------------------------------------
postgresql:
master:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid
slave:
tolerations:
- key: "dev/data-druid"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-druid