This commit is contained in:
havelight-ee
2023-05-11 13:55:28 +09:00
parent 55d4828037
commit 2d70373907
1390 changed files with 0 additions and 1398 deletions

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,5 @@
aiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: kafkaset
version: 0.1.0

View File

@@ -0,0 +1,3 @@
포트 바꾸려면 values.yaml 수정과
broker-config yaml 수정이 동시에 이루어져야 합니다

View File

@@ -0,0 +1,19 @@
apiVersion: v1
appVersion: 0.20.0
description: Kafka GUI for Apache Kafka to manage topics, topics data, consumers group,
schema registry, connect and more...
home: https://akhq.io
icon: https://raw.githubusercontent.com/tchiotludo/akhq/master/client/src/images/logo_black.png
keywords:
- kafka
- confluent
- gui
- schema-registry
- kafka-connect
maintainers:
- email: tchiot.ludo@gmail.com
name: tchiotludo
name: akhq
sources:
- https://github.com/tchiotludo/akhq
version: 0.2.7

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,124 @@
# AKHQ (previously known as KafkaHQ)
![Last Version](https://img.shields.io/github/tag-pre/tchiotludo/akhq.svg)
![License](https://img.shields.io/github/license/tchiotludo/akhq)
![Docker Pull](https://img.shields.io/docker/pulls/tchiotludo/akhq.svg)
![Github Downloads](https://img.shields.io/github/downloads/tchiotludo/akhq/total)
![Github Start](https://img.shields.io/github/stars/tchiotludo/akhq.svg)
![Main](https://github.com/tchiotludo/akhq/workflows/Main/badge.svg)
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/akhq)](https://artifacthub.io/packages/search?repo=akhq)
> Kafka GUI for [Apache Kafka](http://kafka.apache.org/) to manage topics, topics data, consumers group, schema registry, connect and more...
<p align="center">
<img width="460" src="client/src/images/logo_black.svg" alt="AKHQ for Kafka logo" /><br /><br />
<img width="720" src="docs/.vuepress/public/assets/images/video.gif" alt="AKHQ for Kafka preview" />
</p>
## Documentation
* The official AKHQ documentation can be found under: [akhq.io](https://akhq.io/docs)
## Sponsors
<table>
<tr>
<td>
<img width="1000" height="0">
<a href="https://upstash.com/?utm_source=AKHQ" >
<img src="https://raw.githubusercontent.com/upstash/sponsorship/master/kafka.png" alt="Upstash" width="260" align="right">
</a>
<h3>Upstash: Serverless Kafka</h3>
<ul>
<li>True Serverless Kafka with per-request-pricing</li>
<li>Managed Apache Kafka, works with all Kafka clients</li>
<li>Built-in REST API designed for serverless and edge functions</li>
</ul>
[Start for free in 30 seconds!](https://upstash.com/?utm_source=AKHQ)
</td>
</tr>
</table>
<table>
<tr>
<td>
<img width="1000" height="0">
<a href="https://redpanda.com/?utm_source=AKHQ" >
<img style="background-color: #FFF;padding:10px" src="https://user-images.githubusercontent.com/2064609/164302508-76ac1dfb-6c50-43d4-a092-ff275b25a032.svg" alt="Upstash" width="260" align="right">
</a>
<h3>Redpanda</h3>
<ul>
<li>Redpanda is a streaming data platform for developers.</li>
<li>Kafka API compatible.</li>
<li>10x faster. No ZooKeeper. No JVM!</li>
</ul>
[redpanda.com](https://redpanda.com/?utm_source=AKHQ)
</td>
</tr>
</table>
## From AKHQ project creator
<table>
<tr>
<td>
<img width="1000" height="0">
<a href="https://github.com/kestra-io/kestra?utm_source=AKHQ" >
<img src="https://kestra.io/video.gif" alt="Upstash" width="320" align="right">
</a>
<h3>Kestra: Open source data orchestration and scheduling platform</h3>
<p>
Kestra is an infinitely scalable orchestration and scheduling platform, creating, running, scheduling, and monitoring millions of complex pipelines.
</p>
[Discover the project!](https://github.com/kestra-io/kestra?utm_source=AKHQ)
</td>
</tr>
</table>
## Who's using AKHQ
* [Adeo](https://www.adeo.com/)
* [Avlino](https://avlino.com/)
* [Auchan Retail](https://www.auchan-retail.com/)
* [BARMER](https://www.barmer.de/)
* [Bell](https://www.bell.ca)
* [Best buy](https://www.bestbuy.com)
* [BMW Group](https://www.bmwgroup.com)
* [Boulanger](https://www.boulanger.com/)
* [BPCE-IT](https://www.bpce-it.fr/)
* [Decathlon](https://www.decathlon.fr/)
* [Depop](https://www.depop.com)
* [Galeries Lafayette](https://www.galerieslafayette.com/)
* [GetYourGuide](https://www.getyourguide.com)
* [Kitopi](https://kitopi.com)
* [Klarna](https://www.klarna.com)
* [La Redoute](https://laredoute.io/)
* [Leroy Merlin](https://www.leroymerlin.fr/)
* [NEXT Technologies](https://www.nextapp.co/)
* [Nuxeo](https://www.nuxeo.com/)
* [Pipedrive](https://www.pipedrive.com)
* [TVG](https://www.tvg.com)
* [Vodeno](https://www.vodeno.com/)
## Credits
Many thanks to:
* [JetBrains](https://www.jetbrains.com/?from=AKHQ) for their free OpenSource license.
* Apache, Apache Kafka, Kafka, and associated open source project names are trademarks of the Apache Software Foundation. AKHQ is not affiliated with, endorsed by, or otherwise associated with the Apache Software.
[![Jetbrains](https://user-images.githubusercontent.com/2064609/55432917-6df7fc00-5594-11e9-90c4-5133fbb6d4da.png)](https://www.jetbrains.com/?from=AKHQ)
## License
Apache 2.0 © [tchiotludo](https://github.com/tchiotludo)

View File

@@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range $.Values.ingress.paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "akhq.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "akhq.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "akhq.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "akhq.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:{{ .Values.service.port }}
{{- end }}

View File

@@ -0,0 +1,56 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "akhq.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "akhq.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "akhq.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "akhq.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "akhq.fullname" .) .Values.serviceAccountName }}
{{- else }}
{{- default "default" .Values.serviceAccountName }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for Ingress
*/}}
{{- define "akhq.ingress.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.Version -}}
{{- print "extensions/v1beta1" -}}
{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.Version -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,14 @@
{{- if .Values.configuration }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "akhq.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
helm.sh/chart: {{ include "akhq.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
application.yml: |
{{ toYaml .Values.configuration | indent 4}}
{{- end }}

View File

@@ -0,0 +1,129 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "akhq.fullname" . }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
helm.sh/chart: {{ include "akhq.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount | default 1 }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
annotations:
{{- if .Values.configuration }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if and (not .Values.existingSecret) (.Values.secrets) }}
checksum/secrets: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.securityContext }}
securityContext:
{{ toYaml .Values.securityContext | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "akhq.serviceAccountName" . }}
{{- if .Values.initContainers }}
initContainers:
{{- range $key, $value := .Values.initContainers }}
- name: {{ $key }}
{{ toYaml $value | indent 10 }}
{{- end }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy | default "Always" }}
{{- if .Values.containerSecurityContext }}
securityContext:
{{ toYaml .Values.containerSecurityContext | nindent 12 }}
{{- end }}
env:
{{- if .Values.extraEnv }}{{ toYaml .Values.extraEnv | trim | nindent 12 }}{{ end }}
{{- if or (.Values.existingSecrets) (.Values.secrets) }}
- name: MICRONAUT_ENVIRONMENTS
value: secrets
- name: MICRONAUT_CONFIG_FILES
value: /app/application.yml,/app/application-secrets.yml
{{- end }}
volumeMounts:
{{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 12 }}{{ end }}
{{- if .Values.configuration }}
- name: config
mountPath: /app/application.yml
subPath: application.yml
{{- end }}
{{- if or (.Values.existingSecrets) (.Values.secrets) }}
- name: secrets
mountPath: /app/application-secrets.yml
subPath: application-secrets.yml
{{- end }}
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: management
containerPort: 28081
protocol: TCP
livenessProbe:
tcpSocket:
port: management
readinessProbe:
httpGet:
path: {{ .Values.readinessProbe.prefix | default "" }}/health
port: management
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
{{- if .Values.extraVolumes }}{{ toYaml .Values.extraVolumes | trim | nindent 6 }}{{ end }}
{{- if .Values.configuration }}
- name: config
configMap:
name: {{ template "akhq.fullname" . }}
{{- end }}
{{- if or (.Values.existingSecrets) (.Values.secrets) }}
- name: secrets
secret:
{{- if .Values.existingSecrets }}
secretName: {{ .Values.existingSecrets }}
{{- else }}
secretName: {{ template "akhq.fullname" . }}-secrets
{{- end }}
{{- end }}

View File

@@ -0,0 +1,53 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "akhq.fullname" . -}}
{{- $ingressPaths := .Values.ingress.paths -}}
apiVersion: {{ include "akhq.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
helm.sh/chart: {{ include "akhq.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{- tpl (toYaml .) $ | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.ingressClassName (eq (include "akhq.ingress.apiVersion" $) "networking.k8s.io/v1") }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
{{- range $ingressPaths }}
- path: {{ . }}
{{- if eq (include "akhq.ingress.apiVersion" $) "networking.k8s.io/v1" }}
pathType: "ImplementationSpecific"
{{- end }}
backend:
{{- if eq (include "akhq.ingress.apiVersion" $) "networking.k8s.io/v1" }}
service:
name: {{ $fullName }}
port:
name: http
{{ else }}
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if and ( not .Values.existingSecrets) (.Values.secrets) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "akhq.fullname" . }}-secrets
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
helm.sh/chart: {{ include "akhq.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
type: Opaque
data:
application-secrets.yml: {{ toYaml .Values.secrets | b64enc | quote }}
{{- if .Values.kafkaSecrets }}
{{- range $key, $value := .Values.kafkaSecrets }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- if .Values.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "akhq.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
helm.sh/chart: {{ include "akhq.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $key, $value := .Values.service.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
annotations:
{{- range $key, $value := .Values.service.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if and (eq "NodePort" .Values.service.type) .Values.service.httpNodePort }}
nodePort: {{ .Values.service.httpNodePort }}
{{- end }}
selector:
app.kubernetes.io/name: {{ include "akhq.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,15 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: {{ include "akhq.name" . }}
helm.sh/chart: {{ include "akhq.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ include "akhq.serviceAccountName" . }}
{{- end }}

View File

@@ -0,0 +1,145 @@
# imagePullSecrets:
# - name: my-repository-secret
image:
repository: tchiotludo/akhq
tag: "" # uses Chart.AppVersion by default
# custom annotations (example: for prometheus)
annotations: {}
#prometheus.io/scrape: 'true'
#prometheus.io/port: '8080'
#prometheus.io/path: '/prometheus'
podAnnotations: {}
# custom labels
labels: {}
# custom.label: 'true'
podLabels: {}
## You can put directly your configuration here... or add java opts or any other env vars
extraEnv: []
# - name: AKHQ_CONFIGURATION
# value: |
# akhq:
# secrets:
# docker-kafka-server:
# properties:
# bootstrap.servers: "kafka:9092"
# - name: JAVA_OPTS
# value: "-Djavax.net.ssl.trustStore=/usr/local/openjdk-11/lib/security/cacerts -Djavax.net.ssl.trustStorePassword=password"
# - name: CLASSPATH
# value: "/any/additional/jars/desired.jar:/go/here.jar"
## Or you can also use configmap for the configuration...
configuration:
akhq:
server:
access-log:
enabled: false
name: org.akhq.log.access
##... and secret for connection information
existingSecrets: ""
# name of the existingSecret
secrets:
akhq:
connections:
my-cluster-plain-text:
properties:
bootstrap.servers: "kafka:9092"
# schema-registry:
# url: "http://schema-registry:8085"
# type: "confluent"
# basic-auth-username: basic-auth-user
# basic-auth-password: basic-auth-pass
# connect:
# - name: "my-connect"
# url: "http://connect:8083"
# basic-auth-username: basic-auth-user
# basic-auth-password: basic-auth-pass
kafkaSecrets: []
#Provide extra base64 encoded kubernetes secrets (keystore/truststore)
# Any extra volumes to define for the pod (like keystore/truststore)
extraVolumes: []
# Any extra volume mounts to define for the akhq container
extraVolumeMounts: []
# Specify ServiceAccount for pod
serviceAccountName: null
serviceAccount:
create: false
#annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
# Add your own init container or uncomment and modify the example.
initContainers: {}
# create-keystore:
# image: "openjdk:11-slim"
# command: ['sh', '-c', 'keytool']
# volumeMounts:
# - mountPath: /tmp
# name: certs
# Configure the Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext: {}
# runAsNonRoot: true
# runAsUser: 1000
# Configure the Container Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# allowPrivilegeEscalation: false
# privileged: false
# capabilities:
# drop:
# - ALL
# runAsNonRoot: true
# runAsUser: 1001
# readOnlyRootFilesystem: true
service:
enabled: true
type: NodePort
port: 80
nodePort: 32551
labels: {}
annotations:
# cloud.google.com/load-balancer-type: "Internal"
ingress:
enabled: false
ingressClassName: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
paths:
- /
hosts:
- akhq.demo.com
tls: []
# - secretName: akhq-tls
# hosts:
# - akhq.demo.com
readinessProbe:
prefix: "" # set same as `micronaut.server.context-path`
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,25 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
example/
README.md

View File

@@ -0,0 +1,7 @@
apiVersion: v2
appVersion: v0.4.0
description: A Helm chart for kafka-UI
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
name: kafka-ui
type: application
version: v0.4.1

View File

@@ -0,0 +1,3 @@
apiVersion: v1
entries: {}
generated: "2021-11-11T12:26:08.479581+03:00"

View File

@@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
{{- end }}

View File

@@ -0,0 +1,79 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kafka-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kafka-ui.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kafka-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kafka-ui.labels" -}}
helm.sh/chart: {{ include "kafka-ui.chart" . }}
{{ include "kafka-ui.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kafka-ui.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kafka-ui.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
This allows us to check if the registry of the image is specified or not.
*/}}
{{- define "kafka-ui.imageName" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repository := .Values.image.repository -}}
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
{{- if $registryName }}
{{- printf "%s/%s:%s" $registryName $repository $tag -}}
{{- else }}
{{- printf "%s:%s" $repository $tag -}}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,10 @@
{{- if .Values.envs.config -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
{{- toYaml .Values.envs.config | nindent 2 }}
{{- end -}}

View File

@@ -0,0 +1,11 @@
{{- if .Values.yamlApplicationConfig -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}-fromvalues
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
config.yml: |-
{{- toYaml .Values.yamlApplicationConfig | nindent 4}}
{{ end }}

View File

@@ -0,0 +1,139 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: {{ include "kafka-ui.imageName" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
- name: SPRING_CONFIG_LOCATION
{{- if .Values.yamlApplicationConfig }}
value: /kafka-ui/config.yml
{{- else if .Values.yamlApplicationConfigConfigMap }}
value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
{{- end }}
{{- end }}
{{- end }}
envFrom:
{{- if .Values.existingConfigMap }}
- configMapRef:
name: {{ .Values.existingConfigMap }}
{{- end }}
{{- if .Values.envs.config }}
- configMapRef:
name: {{ include "kafka-ui.fullname" . }}
{{- end }}
{{- if .Values.existingSecret }}
- secretRef:
name: {{ .Values.existingSecret }}
{{- end }}
{{- if .Values.envs.secret}}
- secretRef:
name: {{ include "kafka-ui.fullname" . }}
{{- end}}
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
readinessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
volumeMounts:
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.yamlApplicationConfig }}
- name: kafka-ui-yaml-conf
mountPath: /kafka-ui/
{{- end }}
{{- if .Values.yamlApplicationConfigConfigMap}}
- name: kafka-ui-yaml-conf-configmap
mountPath: /kafka-ui/
{{- end }}
{{- end }}
{{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
volumes:
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.yamlApplicationConfig }}
- name: kafka-ui-yaml-conf
configMap:
name: {{ include "kafka-ui.fullname" . }}-fromvalues
{{- end }}
{{- if .Values.yamlApplicationConfigConfigMap}}
- name: kafka-ui-yaml-conf-configmap
configMap:
name: {{ .Values.yamlApplicationConfigConfigMap.name }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "kafka-ui.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,87 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "kafka-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (trimPrefix "v" .Capabilities.KubeVersion.Version | semverCompare ">= 1.19" ) -}}
apiVersion: networking.k8s.io/v1
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls.enabled }}
tls:
- hosts:
- {{ tpl .Values.ingress.host . }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- http:
paths:
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (trimPrefix "v" .Capabilities.KubeVersion.Version | semverCompare ">= 1.19" ) -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
pathType: Prefix
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
- backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
pathType: Prefix
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
pathType: Prefix
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{tpl .Values.ingress.host . }}
{{- end }}
{{- else -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
- backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{ tpl .Values.ingress.host . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
{{- if .Values.networkPolicy.egressRules.customRules }}
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
ingress:
{{- if .Values.networkPolicy.ingressRules.customRules }}
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
type: Opaque
data:
{{- toYaml .Values.envs.secret | nindent 2 }}

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kafka-ui.serviceAccountName" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,151 @@
replicaCount: 1
image:
registry: docker.io
repository: provectuslabs/kafka-ui
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
existingConfigMap: ""
yamlApplicationConfig:
{}
# kafka:
# clusters:
# - name: yaml
# bootstrapServers: kafka-service:9092
# spring:
# security:
# oauth2:
# auth:
# type: disabled
# management:
# health:
# ldap:
# enabled: false
yamlApplicationConfigConfigMap:
{}
# keyName: config.yml
# name: configMapName
existingSecret: ""
envs:
secret: {}
config:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
networkPolicy:
enabled: false
egressRules:
## Additional custom egress rules
## e.g:
## customRules:
## - to:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
ingressRules:
## Additional custom ingress rules
## e.g:
## customRules:
## - from:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
podAnnotations: {}
podLabels: {}
podSecurityContext:
{}
# fsGroup: 2000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: NodePort
port: 80
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
# Ingress configuration
ingress:
# Enable ingress resource
enabled: false
# Annotations for the Ingress
annotations: {}
# ingressClassName for the Ingress
ingressClassName: ""
# The path for the Ingress
path: ""
# The hostname for the Ingress
host: ""
# configs for Ingress TLS
tls:
# Enable TLS termination for the Ingress
enabled: false
# the name of a pre-created Secret containing a TLS private key and certificate
secretName: ""
# HTTP paths to add to the Ingress before the default path
precedingPaths: []
# Http paths to add to the Ingress after the default path
succeedingPaths: []
resources:
{}
# limits:
# cpu: 200m
# memory: 512Mi
# requests:
# cpu: 200m
# memory: 256Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
env: {}
initContainers: {}
volumeMounts: {}
volumes: {}

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,171 @@
kind: ConfigMap
metadata:
name: broker-config
apiVersion: v1
data:
init.sh: |-
#!/bin/bash
set -e
set -x
cp /etc/kafka-configmap/log4j.properties /etc/kafka/
KAFKA_BROKER_ID=${HOSTNAME##*-}
SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/")
LABELS="kafka-broker-id=$KAFKA_BROKER_ID"
ANNOTATIONS=""
hash kubectl 2>/dev/null || {
SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/")
} && {
ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}')
if [ $? -ne 0 ]; then
SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/")
elif [ "x$ZONE" == "x<no value>" ]; then
SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/")
else
SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/")
LABELS="$LABELS kafka-broker-rack=$ZONE"
fi
OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
if [ $? -ne 0 ]; then
echo "Outside (i.e. cluster-external access) host lookup command failed"
else
OUTSIDE_PORT=3240${KAFKA_BROKER_ID}
GLOBAL_PORT=3250${KAFKA_BROKER_ID}
SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${OUTSIDE_HOST}:${GLOBAL_PORT}|")
ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT"
fi
if [ ! -z "$LABELS" ]; then
kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?"
fi
if [ ! -z "$ANNOTATIONS" ]; then
kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?"
fi
}
printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp
[ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties
server.properties: |-
# init부 수정 금지 ( init.sh에 디펜던시)
#init#broker.id=#init#
#init#broker.rack=#init#
#init#advertised.listeners=OUTSIDE://#init#,PLAINTEXT://:9092
########################################################################
##### Broker, Zookeeper
log.dirs=/var/lib/kafka/data/topics
zookeeper.connect=zookeeper:2181
zookeeper.session.timeout.ms=18000
controller.quorum.election.backoff.max.ms=1000
controller.quorum.election.timeout.ms=1000
########################################################################
##### Listener
listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095
listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT
inter.broker.listener.name=PLAINTEXT
########################################################################
###### Thread
background.threads=10
num.recovery.threads.per.data.dir=1
num.io.threads=10
num.network.threads=4
num.replica.fetchers=4
log.cleaner.threads=1
########################################################################
###### Topic, Partition
replica.fetch.min.bytes=1
replica.lag.time.max.ms=30000
auto.create.topics.enable=true
default.replication.factor=1
min.insync.replicas=1
delete.topic.enable=true
num.partitions=12
auto.leader.rebalance.enable=true
leader.imbalance.check.interval.seconds=120
########################################################################
##### Log, Message
log.cleaner.enable=true
log.cleanup.policy=delete
log.segment.delete.delay.ms=60000
log.flush.interval.messages=1000000
log.flush.interval.ms=60000
log.flush.scheduler.interval.ms=2000
log.flush.offset.checkpoint.interval.ms=60000
########################################################################
##### Offset, Commit
offsets.retention.minutes=1440
offsets.topic.replication.factor=1
########################################################################
##### MessageSize, Socket
message.max.bytes=1048576
max.message.bytes=1048576
replica.fetch.max.bytes=1048576
socket.receive.buffer.bytes=1048576
socket.send.buffer.bytes=1048576
replica.socket.receive.buffer.bytes=65536
socket.request.max.bytes=104857600
########################################################################
##### Retention
log.retention.minutes=5
log.retention.bytes=1073741824
log.retention.check.interval.ms=60000
log.segment.bytes=536870912
log4j.properties: |-
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
# Change the two lines below to adjust ZK client logging
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
log4j.logger.org.apache.zookeeper=INFO
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
log4j.logger.kafka=INFO
log4j.logger.org.apache.kafka=INFO
# Change to DEBUG or TRACE to enable request logging
log4j.logger.kafka.request.logger=WARN, requestAppender
log4j.additivity.kafka.request.logger=false
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
# related to the handling of requests
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false
# Change to DEBUG to enable audit log for the authorizer
log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false

View File

@@ -0,0 +1,5 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: kafka
version: 0.1.0

View File

@@ -0,0 +1,14 @@
# A headless service to create DNS records
---
apiVersion: v1
kind: Service
metadata:
name: kafka-headless
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 9092
clusterIP: None
selector:
app: kafka
---

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
# name: bootstrap
name: kafka
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 9092
selector:
app: kafka

View File

@@ -0,0 +1,124 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: kafka
serviceName: "kafka-headless"
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: kafka
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
terminationGracePeriodSeconds: 30
initContainers:
- name: init-config
image: {{ .Values.initContainers.image.repository }}:{{ .Values.initContainers.image.tag }}
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command: ['/bin/bash', '/etc/kafka-configmap/init.sh']
volumeMounts:
- name: configmap
mountPath: /etc/kafka-configmap
- name: config
mountPath: /etc/kafka
- name: extensions
mountPath: /opt/kafka/libs/extensions
containers:
- name: broker
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
env:
- name: CLASSPATH
value: /opt/kafka/libs/extensions/*
- name: KAFKA_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
- name: JMX_PORT
value: "5555"
- name: KAFKA_OPTS
value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml
ports:
- name: inside
containerPort: 9092
- name: outside
containerPort: 9094
- name: global
containerPort: 9095
- name: jmx
containerPort: 9010
command:
- ./bin/kafka-server-start.sh
- /etc/kafka/server.properties
lifecycle:
preStop:
exec:
command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"]
resources:
requests:
cpu: 500m
memory: 6000Mi
limits:
# This limit was intentionally set low as a reminder that
# the entire Yolean/kubernetes-kafka is meant to be tweaked
# before you run production workloads
cpu: 1000m
memory: 10000Mi
readinessProbe:
tcpSocket:
port: 9092
timeoutSeconds: 1
volumeMounts:
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/kafka/data
- name: extensions
mountPath: /opt/kafka/libs/extensions
volumes:
- name: configmap
configMap:
name: broker-config
- name: config
emptyDir: {}
- name: extensions
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.global.StorageClassName }}
resources:
requests:
storage: 50Gi

View File

@@ -0,0 +1,127 @@
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-0
namespace: {{ .Release.Namespace }}
spec:
selector:
app: kafka
kafka-broker-id: "0"
ports:
- protocol: TCP
targetPort: 9094
port: 32400
nodePort: {{ .Values.service.kafka_outside_0 }}
type: NodePort
---
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-1
namespace: {{ .Release.Namespace }}
spec:
selector:
app: kafka
kafka-broker-id: "1"
ports:
- protocol: TCP
targetPort: 9094
port: 32401
nodePort: {{ .Values.service.kafka_outside_1 }}
type: NodePort
---
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-2
namespace: {{ .Release.Namespace }}
spec:
selector:
app: kafka
kafka-broker-id: "2"
ports:
- protocol: TCP
targetPort: 9094
port: 32402
nodePort: {{ .Values.service.kafka_outside_2 }}
type: NodePort
---
kind: Service
apiVersion: v1
metadata:
name: kafka-global-0
namespace: {{ .Release.Namespace }}
spec:
selector:
app: kafka
kafka-broker-id: "0"
ports:
- protocol: TCP
targetPort: 9095
port: 32500
nodePort: {{ .Values.service.kafka_global_0 }}
type: NodePort
---
kind: Service
apiVersion: v1
metadata:
name: kafka-global-1
namespace: {{ .Release.Namespace }}
spec:
selector:
app: kafka
kafka-broker-id: "1"
ports:
- protocol: TCP
targetPort: 9095
port: 32501
nodePort: {{ .Values.service.kafka_global_1 }}
type: NodePort
---
kind: Service
apiVersion: v1
metadata:
name: kafka-global-2
namespace: {{ .Release.Namespace }}
spec:
selector:
app: kafka
kafka-broker-id: "2"
ports:
- protocol: TCP
targetPort: 9095
port: 32502
nodePort: {{ .Values.service.kafka_global_2 }}
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
name: kafka-broker
namespace: {{ .Release.Namespace }}
spec:
type: NodePort
ports:
- port: 9094
name: kafka
protocol: TCP
targetPort: 9094
nodePort: {{ .Values.service.kafka_broker }}
selector:
app: kafka
---
apiVersion: v1
kind: Service
metadata:
name: kafka-broker-global
namespace: {{ .Release.Namespace }}
spec:
type: NodePort
ports:
- port: 9095
name: kafka
protocol: TCP
targetPort: 9095
nodePort: {{ .Values.service.kafka_broker_global }}
selector:
app: kafka

View File

@@ -0,0 +1,73 @@
# Default values for kafka.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
tag: stable
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
service:
kafka_outside_0: "32400"
kafka_outside_1: "32401"
kafka_global_0: "32500"
kafka_global_1: "32501"
kafka_broker: "9094"
kafka_broker_global: "9095"

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,5 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: zookeeper
version: 0.1.0

View File

@@ -0,0 +1,35 @@
kind: ConfigMap
metadata:
name: zookeeper-config
namespace: {{ .Release.Namespace }}
apiVersion: v1
data:
init.sh: |-
#!/bin/bash
set -e
set -x
[ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data
[ -z "$ID_OFFSET" ] && ID_OFFSET=1
export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET))
echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid
cp -Lur /etc/kafka-configmap/* /etc/kafka/
sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties
zookeeper.properties: |-
tickTime=2000
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
clientPort=2181
maxClientCnxns=0
initLimit=5
syncLimit=2
server.1=zookeeper-0.zookeeper-headless.{{ .Release.Namespace }}.svc.cluster.local:2888:3888:participant
server.2=zookeeper-1.zookeeper-headless.{{ .Release.Namespace }}.svc.cluster.local:2888:3888:participant
server.3=zookeeper-2.zookeeper-headless.{{ .Release.Namespace }}.svc.cluster.local:2888:3888:participant
log4j.properties: |-
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
# Suppress connection log messages, three lines per livenessProbe execution
log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN
log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: zookeeper-headless
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zookeeper
storage: persistent

View File

@@ -0,0 +1,12 @@
# the headless service is for PetSet DNS, this one is for clients
apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 2181
name: client
selector:
app: zookeeper

View File

@@ -0,0 +1,97 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: zookeeper
storage: persistent
serviceName: "zookeeper-headless"
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zookeeper
storage: persistent
annotations:
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
terminationGracePeriodSeconds: 10
initContainers:
- name: init-config
image: {{ .Values.initContainers.image.repository }}:{{ .Values.initContainers.image.tag }}
command: ['/bin/bash', '/etc/kafka-configmap/init.sh']
volumeMounts:
- name: configmap
mountPath: /etc/kafka-configmap
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/zookeeper
containers:
- name: zookeeper
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
env:
- name: KAFKA_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
command:
- ./bin/zookeeper-server-start.sh
- /etc/kafka/zookeeper.properties
lifecycle:
preStop:
exec:
command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"]
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 200m
memory: 1000Mi
readinessProbe:
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | nc -w 2 localhost 2181 | grep imok']
volumeMounts:
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/zookeeper
volumes:
- name: configmap
configMap:
name: zookeeper-config
- name: config
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.global.StorageClassName }}
resources:
requests:
storage: 30Gi

View File

@@ -0,0 +1,68 @@
# Default values for zookeeper.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
tag: stable
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,3 @@
apiVersion: v1
entries: {}
generated: "2019-11-05T09:47:03.285264152+09:00"

View File

@@ -0,0 +1,16 @@
kind: ClusterRoleBinding
{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
metadata:
name: {{ .Release.Name }}-cluster-admin-clusterrolebinding
subjects:
- kind: ServiceAccount
name: default
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin

View File

@@ -0,0 +1,637 @@
---
# Source: kafkaset/charts/akhq/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: test-akhq-secrets
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
application-secrets.yml: "YWtocToKICBjb25uZWN0aW9uczoKICAgIG15LWNsdXN0ZXItcGxhaW4tdGV4dDoKICAgICAgcHJvcGVydGllczoKICAgICAgICBib290c3RyYXAuc2VydmVyczoga2Fma2E6OTA5Mg=="
---
# Source: kafkaset/charts/akhq/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: test-akhq
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
data:
application.yml: |
akhq:
server:
access-log:
enabled: false
name: org.akhq.log.access
---
# Source: kafkaset/charts/zookeeper/templates/0.config.yaml
kind: ConfigMap
metadata:
name: zookeeper-config
namespace: dsk-middle
apiVersion: v1
data:
init.sh: |-
#!/bin/bash
set -e
set -x
[ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data
[ -z "$ID_OFFSET" ] && ID_OFFSET=1
export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET))
echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid
cp -Lur /etc/kafka-configmap/* /etc/kafka/
sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties
zookeeper.properties: |-
tickTime=2000
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
clientPort=2181
maxClientCnxns=0
initLimit=5
syncLimit=2
server.1=zookeeper-0.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant
server.2=zookeeper-1.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant
server.3=zookeeper-2.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant
log4j.properties: |-
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
# Suppress connection log messages, three lines per livenessProbe execution
log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN
log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN
---
# Source: kafkaset/templates/role.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: test-cluster-admin-clusterrolebinding
subjects:
- kind: ServiceAccount
name: default
namespace: dsk-middle
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
---
# Source: kafkaset/charts/akhq/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: test-akhq
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: akhq
app.kubernetes.io/instance: test
---
# Source: kafkaset/charts/kafka/templates/2.dns.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-headless
namespace: dsk-middle
spec:
ports:
- port: 9092
clusterIP: None
selector:
app: kafka
---
# Source: kafkaset/charts/kafka/templates/3.bootstrap-service.yaml
apiVersion: v1
kind: Service
metadata:
# name: bootstrap
name: kafka
namespace: dsk-middle
spec:
ports:
- port: 9092
selector:
app: kafka
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-0
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "0"
ports:
- protocol: TCP
targetPort: 9094
port: 32400
nodePort: 32400
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-1
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "1"
ports:
- protocol: TCP
targetPort: 9094
port: 32401
nodePort: 32401
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-outside-2
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "2"
ports:
- protocol: TCP
targetPort: 9094
port: 32402
nodePort: 32402
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-global-0
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "0"
ports:
- protocol: TCP
targetPort: 9095
port: 32500
nodePort: 32500
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-global-1
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "1"
ports:
- protocol: TCP
targetPort: 9095
port: 32501
nodePort: 32501
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-global-2
namespace: dsk-middle
spec:
selector:
app: kafka
kafka-broker-id: "2"
ports:
- protocol: TCP
targetPort: 9095
port: 32502
nodePort: 32502
type: NodePort
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-broker
namespace: dsk-middle
spec:
type: NodePort
ports:
- port: 9094
name: kafka
protocol: TCP
targetPort: 9094
nodePort: 30094
selector:
app: kafka
---
# Source: kafkaset/charts/kafka/templates/6.outside.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-broker-global
namespace: dsk-middle
spec:
type: NodePort
ports:
- port: 9095
name: kafka
protocol: TCP
targetPort: 9095
nodePort: 30095
selector:
app: kafka
---
# Source: kafkaset/charts/zookeeper/templates/1.service-leader-election.yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper-headless
namespace: dsk-middle
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zookeeper
storage: persistent
---
# Source: kafkaset/charts/zookeeper/templates/2.service-client.yaml
# the headless service is for PetSet DNS, this one is for clients
apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: dsk-middle
spec:
ports:
- port: 2181
name: client
selector:
app: zookeeper
---
# Source: kafkaset/charts/akhq/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-akhq
labels:
app.kubernetes.io/name: akhq
helm.sh/chart: akhq-0.2.7
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: akhq
app.kubernetes.io/instance: test
template:
metadata:
annotations:
checksum/config: 00490bc3c20c1a8c6ab1b49540d63065ad39aae5b19766fc0a884db2c0b5ecbf
checksum/secrets: 235bfd9fa6c8713d840dc969c1c05fd1b82c200a02bd4187955d14a983effe58
labels:
app.kubernetes.io/name: akhq
app.kubernetes.io/instance: test
spec:
serviceAccountName: default
containers:
- name: akhq
image: "tchiotludo/akhq:0.20.0"
imagePullPolicy: Always
env:
- name: MICRONAUT_ENVIRONMENTS
value: secrets
- name: MICRONAUT_CONFIG_FILES
value: /app/application.yml,/app/application-secrets.yml
volumeMounts:
- name: config
mountPath: /app/application.yml
subPath: application.yml
- name: secrets
mountPath: /app/application-secrets.yml
subPath: application-secrets.yml
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: management
containerPort: 28081
protocol: TCP
livenessProbe:
tcpSocket:
port: management
readinessProbe:
httpGet:
path: /health
port: management
resources:
{}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
tolerations:
- key: dev/data-kafka
operator: Exists
volumes:
- name: config
configMap:
name: test-akhq
- name: secrets
secret:
secretName: test-akhq-secrets
---
# Source: kafkaset/charts/kafka/templates/5.kafka.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
namespace: dsk-middle
spec:
selector:
matchLabels:
app: kafka
serviceName: "kafka-headless"
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: kafka
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
weight: 100
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- zookeeper
topologyKey: kubernetes.io/hostname
weight: 50
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- kafka
topologyKey: kubernetes.io/hostname
weight: 50
tolerations:
- key: dev/data-kafka
operator: Exists
terminationGracePeriodSeconds: 30
initContainers:
- name: init-config
image: datasaker/kafka-initutils:v1.0.0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command: ['/bin/bash', '/etc/kafka-configmap/init.sh']
volumeMounts:
- name: configmap
mountPath: /etc/kafka-configmap
- name: config
mountPath: /etc/kafka
- name: extensions
mountPath: /opt/kafka/libs/extensions
containers:
- name: broker
image: datasaker/kafka:v1.0.1
env:
- name: CLASSPATH
value: /opt/kafka/libs/extensions/*
- name: KAFKA_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
- name: JMX_PORT
value: "5555"
- name: KAFKA_OPTS
value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml
ports:
- name: inside
containerPort: 9092
- name: outside
containerPort: 9094
- name: global
containerPort: 9095
- name: jmx
containerPort: 9010
command:
- ./bin/kafka-server-start.sh
- /etc/kafka/server.properties
lifecycle:
preStop:
exec:
command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"]
resources:
requests:
cpu: 500m
memory: 6000Mi
limits:
# This limit was intentionally set low as a reminder that
# the entire Yolean/kubernetes-kafka is meant to be tweaked
# before you run production workloads
cpu: 1000m
memory: 10000Mi
readinessProbe:
tcpSocket:
port: 9092
timeoutSeconds: 1
volumeMounts:
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/kafka/data
- name: extensions
mountPath: /opt/kafka/libs/extensions
volumes:
- name: configmap
configMap:
name: broker-config
- name: config
emptyDir: {}
- name: extensions
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName:
resources:
requests:
storage: 50Gi
---
# Source: kafkaset/charts/zookeeper/templates/4.statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
namespace: dsk-middle
spec:
selector:
matchLabels:
app: zookeeper
storage: persistent
serviceName: "zookeeper-headless"
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zookeeper
storage: persistent
annotations:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- zookeeper
topologyKey: kubernetes.io/hostname
weight: 50
tolerations:
- key: dev/data-kafka
operator: Exists
terminationGracePeriodSeconds: 10
initContainers:
- name: init-config
image: datasaker/kafka-initutils:v1.0.0
command: ['/bin/bash', '/etc/kafka-configmap/init.sh']
volumeMounts:
- name: configmap
mountPath: /etc/kafka-configmap
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/zookeeper
containers:
- name: zookeeper
image: datasaker/kafka:v1.0.0
env:
- name: KAFKA_LOG4J_OPTS
value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties
command:
- ./bin/zookeeper-server-start.sh
- /etc/kafka/zookeeper.properties
lifecycle:
preStop:
exec:
command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"]
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 200m
memory: 1000Mi
readinessProbe:
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | nc -w 2 localhost 2181 | grep imok']
volumeMounts:
- name: config
mountPath: /etc/kafka
- name: data
mountPath: /var/lib/zookeeper
volumes:
- name: configmap
configMap:
name: zookeeper-config
- name: config
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName:
resources:
requests:
storage: 30Gi
---
# Source: kafkaset/charts/kafka/templates/2.dns.yaml
# A headless service to create DNS records

View File

@@ -0,0 +1,199 @@
# Default values for sample.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
tag: stable
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
#service:
# type: ClusterIP
# port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {"datasaker/group": "data"}
tolerations: []
affinity: {}
global:
KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}"
# KAFK_INITUTILS_VERSION: v1.0.0
# KAFKA_VERSION: v1.0.1
# 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것)
# IMXC_REGISTRY: icn.ocir.io/cntxl7bbdp4p
# StorageClassName: openebs-hostpath
# kafka의 노드 포트들을 명-시
kafka:
image:
repository: datasaker/kafka
tag: v1.0.1
initContainers:
image:
repository: datasaker/kafka-initutils
tag: v1.0.0
service:
kafka_outside_0: "32400"
kafka_outside_1: "32401"
kafka_outside_2: "32402"
kafka_global_0: "32500"
kafka_global_1: "32501"
kafka_global_2: "32502"
kafka_broker: "30094"
kafka_broker_global: "30095"
tolerations:
- key: "dev/data-kafka"
operator: "Exists"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: "datasaker/group"
operator: In
values:
- "data-kafka"
weight: 100
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- kafka
topologyKey: "kubernetes.io/hostname"
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zookeeper
topologyKey: "kubernetes.io/hostname"
zookeeper:
image:
repository: datasaker/kafka
tag: v1.0.0
initContainers:
image:
repository: datasaker/kafka-initutils
tag: v1.0.0
tolerations:
- key: "dev/data-kafka"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zookeeper
topologyKey: "kubernetes.io/hostname"
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
akhq:
service:
enabled: true
type: NodePort
port: 80
nodePort: 32551
secrets:
akhq:
connections:
my-cluster-plain-text:
properties:
bootstrap.servers: "kafka:9092"
tolerations:
- key: "dev/data-kafka"
operator: "Exists"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: datasaker/group
operator: In
values:
- data-kafka
# nodeSelector: {"datasaker/group": "data-kafka"}