Files
dsk-iac/packer/ansible/roles/helm_install/files/kafka/test
havelight-ee 2d70373907 update
2023-05-11 13:55:28 +09:00

638 lines
16 KiB
Plaintext

---
# Source: kafkaset/charts/akhq/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: test-akhq-secrets
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
application-secrets.yml: "YWtocToKICBjb25uZWN0aW9uczoKICAgIG15LWNsdXN0ZXItcGxhaW4tdGV4dDoKICAgICAgcHJvcGVydGllczoKICAgICAgICBib290c3RyYXAuc2VydmVyczoga2Fma2E6OTA5Mg=="
---
# Source: kafkaset/charts/akhq/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: test-akhq
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
data:
application.yml: |
akhq:
server:
access-log:
enabled: false
name: org.akhq.log.access
---
# Source: kafkaset/charts/zookeeper/templates/0.config.yaml
kind: ConfigMap
metadata:
name: zookeeper-config
namespace: dsk-middle
apiVersion: v1
data:
init.sh: |-
#!/bin/bash
set -e
set -x
[ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data
[ -z "$ID_OFFSET" ] && ID_OFFSET=1
export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET))
echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid
cp -Lur /etc/kafka-configmap/* /etc/kafka/
sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties
zookeeper.properties: |-
tickTime=2000
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
clientPort=2181
maxClientCnxns=0
initLimit=5
syncLimit=2
server.1=zookeeper-0.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant
server.2=zookeeper-1.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant
server.3=zookeeper-2.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant
log4j.properties: |-
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
# Suppress connection log messages, three lines per livenessProbe execution
log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN
log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN
---
# Source: kafkaset/templates/role.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: test-cluster-admin-clusterrolebinding
subjects:
- kind: ServiceAccount
name: default
namespace: dsk-middle
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
---
# Source: kafkaset/charts/akhq/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: test-akhq
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: akhq
app.kubernetes.io/instance: test
---
# Source: kafkaset/charts/kafka/templates/2.dns.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-headless
namespace: dsk-middle
spec:
ports:
- port: 9092
clusterIP: None
selector:
app: kafka
---
# Source: kafkaset/charts/kafka/templates/3.bootstrap-service.yaml
apiVersion: v1
kind: Service
metadata:
# name: bootstrap
name: kafka
namespace: dsk-middle
spec:
ports:
- port: 9092
selector:
app: kafka
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-0
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "0"
ports:
- protocol: TCP
targetPort: 9094
port: 32400
nodePort: 32400
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-1
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "1"
ports:
- protocol: TCP
targetPort: 9094
port: 32401
nodePort: 32401
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-2
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "2"
ports:
- protocol: TCP
targetPort: 9094
port: 32402
nodePort: 32402
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-global-0
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "0"
ports:
- protocol: TCP
targetPort: 9095
port: 32500
nodePort: 32500
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-global-1
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "1"
ports:
- protocol: TCP
targetPort: 9095
port: 32501
nodePort: 32501
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-global-2
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "2"
ports:
- protocol: TCP
targetPort: 9095
port: 32502
nodePort: 32502
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-broker
namespace: dsk-middle
spec:
type: NodePort
ports:
- port: 9094
name: kafka
protocol: TCP
targetPort: 9094
nodePort: 30094
selector:
app: kafka
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-broker-global
namespace: dsk-middle
spec:
type: NodePort
ports:
- port: 9095
name: kafka
protocol: TCP
targetPort: 9095
nodePort: 30095
selector:
app: kafka
---
# Source: kafkaset/charts/zookeeper/templates/1.service-leader-election.yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper-headless
namespace: dsk-middle
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zookeeper
storage: persistent
---
# Source: kafkaset/charts/zookeeper/templates/2.service-client.yaml
# the headless service is for PetSet DNS, this one is for clients
apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: dsk-middle
spec:
ports:
- port: 2181
name: client
selector:
app: zookeeper
---
# Source: kafkaset/charts/akhq/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-akhq
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: akhq
app.kubernetes.io/instance: test
template:
metadata:
annotations:
checksum/config: 00490bc3c20c1a8c6ab1b49540d63065ad39aae5b19766fc0a884db2c0b5ecbf
checksum/secrets: 235bfd9fa6c8713d840dc969c1c05fd1b82c200a02bd4187955d14a983effe58
labels:
app.kubernetes.io/name: akhq
app.kubernetes.io/instance: test
spec:
serviceAccountName: default
containers:
- name: akhq
image: "tchiotludo/akhq:0.20.0"
imagePullPolicy: Always
env:
- name: MICRONAUT_ENVIRONMENTS
value: secrets
- name: MICRONAUT_CONFIG_FILES
value: /app/application.yml,/app/application-secrets.yml
volumeMounts:
- name: config
mountPath: /app/application.yml
subPath: application.yml
- name: secrets
mountPath: /app/application-secrets.yml
subPath: application-secrets.yml
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: management
containerPort: 28081
protocol: TCP
livenessProbe:
tcpSocket:
port: management
readinessProbe:
httpGet:
path: /health
port: management
resources:
{}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
tolerations:
- key: dev/data-kafka
operator: Exists
volumes:
- name: config
configMap:
name: test-akhq
- name: secrets
secret:
secretName: test-akhq-secrets
---
# Source: kafkaset/charts/kafka/templates/5.kafka.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
namespace: dsk-middle
spec:
selector:
matchLabels:
app: kafka
serviceName: "kafka-headless"
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: kafka
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
weight: 100
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- zookeeper
topologyKey: kubernetes.io/hostname
weight: 50
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- kafka
topologyKey: kubernetes.io/hostname
weight: 50
tolerations:
- key: dev/data-kafka
operator: Exists
terminationGracePeriodSeconds: 30
initContainers:
- name: init-config
image: datasaker/kafka-initutils:v1.0.0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command: ['/bin/bash', '/etc/kafka-configmap/init.sh']
volumeMounts:
- name: configmap
mountPath: /etc/kafka-configmap
- name: config
mountPath: /etc/kafka
- name: extensions
mountPath: /opt/kafka/libs/extensions
containers:
- name: broker
image: datasaker/kafka:v1.0.1
env:
- name: CLASSPATH
value: /opt/kafka/libs/extensions/*
- name: KAFKA_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
- name: JMX_PORT
value: "5555"
- name: KAFKA_OPTS
value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml
ports:
- name: inside
containerPort: 9092
- name: outside
containerPort: 9094
- name: global
containerPort: 9095
- name: jmx
containerPort: 9010
command:
- ./bin/kafka-server-start.sh
- /etc/kafka/server.properties
lifecycle:
preStop:
exec:
command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"]
resources:
requests:
cpu: 500m
memory: 6000Mi
limits:
# This limit was intentionally set low as a reminder that
# the entire Yolean/kubernetes-kafka is meant to be tweaked
# before you run production workloads
cpu: 1000m
memory: 10000Mi
readinessProbe:
tcpSocket:
port: 9092
timeoutSeconds: 1
volumeMounts:
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/kafka/data
- name: extensions
mountPath: /opt/kafka/libs/extensions
volumes:
- name: configmap
configMap:
name: broker-config
- name: config
emptyDir: {}
- name: extensions
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName:
resources:
requests:
storage: 50Gi
---
# Source: kafkaset/charts/zookeeper/templates/4.statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
namespace: dsk-middle
spec:
selector:
matchLabels:
app: zookeeper
storage: persistent
serviceName: "zookeeper-headless"
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zookeeper
storage: persistent
annotations:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- zookeeper
topologyKey: kubernetes.io/hostname
weight: 50
tolerations:
- key: dev/data-kafka
operator: Exists
terminationGracePeriodSeconds: 10
initContainers:
- name: init-config
image: datasaker/kafka-initutils:v1.0.0
command: ['/bin/bash', '/etc/kafka-configmap/init.sh']
volumeMounts:
- name: configmap
mountPath: /etc/kafka-configmap
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/zookeeper
containers:
- name: zookeeper
image: datasaker/kafka:v1.0.0
env:
- name: KAFKA_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
command:
- ./bin/zookeeper-server-start.sh
- /etc/kafka/zookeeper.properties
lifecycle:
preStop:
exec:
command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"]
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 200m
memory: 1000Mi
readinessProbe:
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | nc -w 2 localhost 2181 | grep imok']
volumeMounts:
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/zookeeper
volumes:
- name: configmap
configMap:
name: zookeeper-config
- name: config
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName:
resources:
requests:
storage: 30Gi
---
# Source: kafkaset/charts/kafka/templates/2.dns.yaml
# A headless service to create DNS records