configVars: druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service"]' druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid # integration druid exporter configuration druid_emitter: http druid_emitter_http_recipientBaseUrl: http://prometheus-druid-exporter:8080/druid broker: config: DRUID_XMX: 4g DRUID_XMS: 4g DRUID_MAXDIRECTMEMORYSIZE: 1g druid_server_http_maxSubqueryRows: "1000000" tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid coordinator: config: DRUID_XMX: 4g DRUID_XMS: 4g tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid overlord: javaOpts: "-Xms4G -Xmx4G" tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid historical: config: DRUID_XMX: 10g DRUID_XMS: 10g druid_processing_numThreads: 3 druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]' druid_server_maxSize: '500g' persistence: size: "500Gi" tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid middleManager: config: DRUID_XMX: 1g DRUID_XMS: 1g druid_indexer_runner_javaOptsArray: '["-server", "-Xms4g", "-Xmx4g", "-XX:MaxDirectMemorySize=6g", "-XX:+UseStringDeduplication", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]' druid_worker.capacity: 8 druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB' druid_indexer_fork_property_druid_processing_numThreads: 1 druid_indexer_fork_property_druid_processing_numMergeBuffers: 2 persistence: size: "500Gi" tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid router: serviceType: NodePort # templates/router/service.yaml에서 nodePort 속성 추가 nodePort: 30888 tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid # ------------------------------------------------------------------------------ # Zookeeper: # ------------------------------------------------------------------------------ zookeeper: tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid # ------------------------------------------------------------------------------ # postgres: # ------------------------------------------------------------------------------ postgresql: master: tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid slave: tolerations: - key: "dev/data-druid" operator: "Exists" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: datasaker/group operator: In values: - data-druid