update
This commit is contained in:
@@ -0,0 +1,21 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
RELEASE := helm-es-config
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
secrets:
|
||||
kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true
|
||||
kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic
|
||||
kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd'
|
||||
kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key
|
||||
kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test
|
||||
|
||||
test: secrets install goss
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,27 @@
|
||||
# Config
|
||||
|
||||
This example deploy a single node Elasticsearch 8.4.1 with authentication and
|
||||
custom [values][].
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* Create the required secrets: `make secrets`
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/config-master 9200
|
||||
curl -u elastic:changeme http://localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
You can also run [goss integration tests][] using `make test`
|
||||
|
||||
|
||||
[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/config/test/goss.yaml
|
||||
[values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/config/values.yaml
|
||||
@@ -0,0 +1,31 @@
|
||||
http:
|
||||
https://localhost:9200/_cluster/health:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
allow-insecure: true
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- "green"
|
||||
- '"number_of_nodes":1'
|
||||
- '"number_of_data_nodes":1'
|
||||
|
||||
https://localhost:9200:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
username: elastic
|
||||
allow-insecure: true
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- '"cluster_name" : "config"'
|
||||
- "You Know, for Search"
|
||||
|
||||
command:
|
||||
"elasticsearch-keystore list":
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- keystore.seed
|
||||
- bootstrap.password
|
||||
- xpack.notification.slack.account.monitoring.secure_url
|
||||
- xpack.notification.slack.account.otheraccount.secure_url
|
||||
- xpack.watcher.encryption_key
|
||||
@@ -0,0 +1,29 @@
|
||||
---
|
||||
clusterName: "config"
|
||||
replicas: 1
|
||||
|
||||
extraEnvs:
|
||||
- name: ELASTIC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: elastic-config-credentials
|
||||
key: password
|
||||
|
||||
# This is just a dummy file to make sure that
|
||||
# the keystore can be mounted at the same time
|
||||
# as a custom elasticsearch.yml
|
||||
esConfig:
|
||||
elasticsearch.yml: |
|
||||
xpack.security.enabled: true
|
||||
path.data: /usr/share/elasticsearch/data
|
||||
|
||||
keystore:
|
||||
- secretName: elastic-config-secret
|
||||
- secretName: elastic-config-slack
|
||||
- secretName: elastic-config-custom-path
|
||||
items:
|
||||
- key: slack_url
|
||||
path: xpack.notification.slack.account.otheraccount.secure_url
|
||||
|
||||
secret:
|
||||
enabled: false
|
||||
@@ -0,0 +1 @@
|
||||
supersecret
|
||||
@@ -0,0 +1,14 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
RELEASE := helm-es-default
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../
|
||||
|
||||
test: install goss
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,25 @@
|
||||
# Default
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 cluster using
|
||||
[default values][].
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/elasticsearch-master 9200
|
||||
curl localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
You can also run [goss integration tests][] using `make test`
|
||||
|
||||
|
||||
[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/default/test/goss.yaml
|
||||
[default values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/values.yaml
|
||||
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash -x
|
||||
|
||||
kubectl proxy || true &
|
||||
|
||||
make &
|
||||
PROC_ID=$!
|
||||
|
||||
while kill -0 "$PROC_ID" >/dev/null 2>&1; do
|
||||
echo "PROCESS IS RUNNING"
|
||||
if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then
|
||||
echo "cluster is healthy"
|
||||
else
|
||||
echo "cluster not healthy!"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "PROCESS TERMINATED"
|
||||
exit 0
|
||||
@@ -0,0 +1,44 @@
|
||||
kernel-param:
|
||||
vm.max_map_count:
|
||||
value: "262144"
|
||||
|
||||
http:
|
||||
https://elasticsearch-master:9200/_cluster/health:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
username: elastic
|
||||
allow-insecure: true
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- "green"
|
||||
- '"number_of_nodes":3'
|
||||
- '"number_of_data_nodes":3'
|
||||
|
||||
https://localhost:9200:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
allow-insecure: true
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- '"number" : "8.4.1"'
|
||||
- '"cluster_name" : "elasticsearch"'
|
||||
- "You Know, for Search"
|
||||
|
||||
file:
|
||||
/usr/share/elasticsearch/data:
|
||||
exists: true
|
||||
mode: "2775"
|
||||
owner: root
|
||||
group: elasticsearch
|
||||
filetype: directory
|
||||
|
||||
mount:
|
||||
/usr/share/elasticsearch/data:
|
||||
exists: true
|
||||
|
||||
user:
|
||||
elasticsearch:
|
||||
exists: true
|
||||
uid: 1000
|
||||
gid: 1000
|
||||
@@ -0,0 +1,13 @@
|
||||
default: test
|
||||
|
||||
RELEASE := helm-es-docker-for-mac
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
test: install
|
||||
helm test $(RELEASE)
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,23 @@
|
||||
# Docker for Mac
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [Docker for Mac][]
|
||||
using [custom values][].
|
||||
|
||||
Note that this configuration should be used for test only and isn't recommended
|
||||
for production.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/elasticsearch-master 9200
|
||||
curl localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
|
||||
[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/docker-for-mac/values.yaml
|
||||
[docker for mac]: https://docs.docker.com/docker-for-mac/kubernetes/
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Permit co-located instances for solitary minikube virtual machines.
|
||||
antiAffinity: "soft"
|
||||
|
||||
# Shrink default JVM heap.
|
||||
esJavaOpts: "-Xmx128m -Xms128m"
|
||||
|
||||
# Allocate smaller chunks of memory per pod.
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "512M"
|
||||
limits:
|
||||
cpu: "1000m"
|
||||
memory: "512M"
|
||||
|
||||
# Request smaller persistent volumes.
|
||||
volumeClaimTemplate:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "hostpath"
|
||||
resources:
|
||||
requests:
|
||||
storage: 100M
|
||||
@@ -0,0 +1,17 @@
|
||||
default: test
|
||||
|
||||
RELEASE := helm-es-kind
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
install-local-path:
|
||||
kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values-local-path.yaml $(RELEASE) ../../
|
||||
|
||||
test: install
|
||||
helm test $(RELEASE)
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,36 @@
|
||||
# KIND
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [Kind][]
|
||||
using [custom values][].
|
||||
|
||||
Note that this configuration should be used for test only and isn't recommended
|
||||
for production.
|
||||
|
||||
Note that Kind < 0.7.0 are affected by a [kind issue][] with mount points
|
||||
created from PVCs not writable by non-root users. [kubernetes-sigs/kind#1157][]
|
||||
fix it in Kind 0.7.0.
|
||||
|
||||
The workaround for Kind < 0.7.0 is to install manually
|
||||
[Rancher Local Path Provisioner][] and use `local-path` storage class for
|
||||
Elasticsearch volumes (see [Makefile][] instructions).
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* For Kind >= 0.7.0: Deploy Elasticsearch chart with the default values: `make install`
|
||||
* For Kind < 0.7.0: Deploy Elasticsearch chart with `local-path` storage class: `make install-local-path`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/elasticsearch-master 9200
|
||||
curl localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
|
||||
[custom values]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/kubernetes-kind/values.yaml
|
||||
[kind]: https://kind.sigs.k8s.io/
|
||||
[kind issue]: https://github.com/kubernetes-sigs/kind/issues/830
|
||||
[kubernetes-sigs/kind#1157]: https://github.com/kubernetes-sigs/kind/pull/1157
|
||||
[rancher local path provisioner]: https://github.com/rancher/local-path-provisioner
|
||||
[Makefile]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/kubernetes-kind/Makefile
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Permit co-located instances for solitary minikube virtual machines.
|
||||
antiAffinity: "soft"
|
||||
|
||||
# Shrink default JVM heap.
|
||||
esJavaOpts: "-Xmx128m -Xms128m"
|
||||
|
||||
# Allocate smaller chunks of memory per pod.
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "512M"
|
||||
limits:
|
||||
cpu: "1000m"
|
||||
memory: "512M"
|
||||
|
||||
# Request smaller persistent volumes.
|
||||
volumeClaimTemplate:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "local-path"
|
||||
resources:
|
||||
requests:
|
||||
storage: 100M
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Permit co-located instances for solitary minikube virtual machines.
|
||||
antiAffinity: "soft"
|
||||
|
||||
# Shrink default JVM heap.
|
||||
esJavaOpts: "-Xmx128m -Xms128m"
|
||||
|
||||
# Allocate smaller chunks of memory per pod.
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "512M"
|
||||
limits:
|
||||
cpu: "1000m"
|
||||
memory: "512M"
|
||||
|
||||
# Request smaller persistent volumes.
|
||||
volumeClaimTemplate:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "local-path"
|
||||
resources:
|
||||
requests:
|
||||
storage: 100M
|
||||
@@ -0,0 +1,13 @@
|
||||
default: test
|
||||
|
||||
RELEASE := helm-es-microk8s
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
test: install
|
||||
helm test $(RELEASE)
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,32 @@
|
||||
# MicroK8S
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [MicroK8S][]
|
||||
using [custom values][].
|
||||
|
||||
Note that this configuration should be used for test only and isn't recommended
|
||||
for production.
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
The following MicroK8S [addons][] need to be enabled:
|
||||
- `dns`
|
||||
- `helm`
|
||||
- `storage`
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/elasticsearch-master 9200
|
||||
curl localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
|
||||
[addons]: https://microk8s.io/docs/addons
|
||||
[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/microk8s/values.yaml
|
||||
[MicroK8S]: https://microk8s.io
|
||||
@@ -0,0 +1,32 @@
|
||||
---
|
||||
# Disable privileged init Container creation.
|
||||
sysctlInitContainer:
|
||||
enabled: false
|
||||
|
||||
# Restrict the use of the memory-mapping when sysctlInitContainer is disabled.
|
||||
esConfig:
|
||||
elasticsearch.yml: |
|
||||
node.store.allow_mmap: false
|
||||
|
||||
# Permit co-located instances for solitary minikube virtual machines.
|
||||
antiAffinity: "soft"
|
||||
|
||||
# Shrink default JVM heap.
|
||||
esJavaOpts: "-Xmx128m -Xms128m"
|
||||
|
||||
# Allocate smaller chunks of memory per pod.
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "512M"
|
||||
limits:
|
||||
cpu: "1000m"
|
||||
memory: "512M"
|
||||
|
||||
# Request smaller persistent volumes.
|
||||
volumeClaimTemplate:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "microk8s-hostpath"
|
||||
resources:
|
||||
requests:
|
||||
storage: 100M
|
||||
@@ -0,0 +1,10 @@
|
||||
PREFIX := helm-es-migration
|
||||
|
||||
data:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../
|
||||
|
||||
master:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../
|
||||
|
||||
client:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../
|
||||
@@ -0,0 +1,167 @@
|
||||
# Migration Guide from helm/charts
|
||||
|
||||
There are two viable options for migrating from the community Elasticsearch Helm
|
||||
chart from the [helm/charts][] repo.
|
||||
|
||||
1. Restoring from Snapshot to a fresh cluster
|
||||
2. Live migration by joining a new cluster to the existing cluster.
|
||||
|
||||
## Restoring from Snapshot
|
||||
|
||||
This is the recommended and preferred option. The downside is that it will
|
||||
involve a period of write downtime during the migration. If you have a way to
|
||||
temporarily stop writes to your cluster then this is the way to go. This is also
|
||||
a lot simpler as it just involves launching a fresh cluster and restoring a
|
||||
snapshot following the [restoring to a different cluster guide][].
|
||||
|
||||
## Live migration
|
||||
|
||||
If restoring from a snapshot is not possible due to the write downtime then a
|
||||
live migration is also possible. It is very important to first test this in a
|
||||
testing environment to make sure you are comfortable with the process and fully
|
||||
understand what is happening.
|
||||
|
||||
This process will involve joining a new set of master, data and client nodes to
|
||||
an existing cluster that has been deployed using the [helm/charts][] community
|
||||
chart. Nodes will then be replaced one by one in a controlled fashion to
|
||||
decommission the old cluster.
|
||||
|
||||
This example will be using the default values for the existing helm/charts
|
||||
release and for the Elastic helm-charts release. If you have changed any of the
|
||||
default values then you will need to first make sure that your values are
|
||||
configured in a compatible way before starting the migration.
|
||||
|
||||
The process will involve a re-sync and a rolling restart of all of your data
|
||||
nodes. Therefore it is important to disable shard allocation and perform a synced
|
||||
flush like you normally would during any other rolling upgrade. See the
|
||||
[rolling upgrades guide][] for more information.
|
||||
|
||||
* The default image for this chart is
|
||||
`docker.elastic.co/elasticsearch/elasticsearch` which contains the default
|
||||
distribution of Elasticsearch with a [basic license][]. Make sure to update the
|
||||
`image` and `imageTag` values to the correct Docker image and Elasticsearch
|
||||
version that you currently have deployed.
|
||||
|
||||
* Convert your current helm/charts configuration into something that is
|
||||
compatible with this chart.
|
||||
|
||||
* Take a fresh snapshot of your cluster. If something goes wrong you want to be
|
||||
able to restore your data no matter what.
|
||||
|
||||
* Check that your clusters health is green. If not abort and make sure your
|
||||
cluster is healthy before continuing:
|
||||
|
||||
```
|
||||
curl localhost:9200/_cluster/health
|
||||
```
|
||||
|
||||
* Deploy new data nodes which will join the existing cluster. Take a look at the
|
||||
configuration in [data.yaml][]:
|
||||
|
||||
```
|
||||
make data
|
||||
```
|
||||
|
||||
* Check that the new nodes have joined the cluster (run this and any other curl
|
||||
commands from within one of your pods):
|
||||
|
||||
```
|
||||
curl localhost:9200/_cat/nodes
|
||||
```
|
||||
|
||||
* Check that your cluster is still green. If so we can now start to scale down
|
||||
the existing data nodes. Assuming you have the default amount of data nodes (2)
|
||||
we now want to scale it down to 1:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets my-release-elasticsearch-data --replicas=1
|
||||
```
|
||||
|
||||
* Wait for your cluster to become green again:
|
||||
|
||||
```
|
||||
watch 'curl -s localhost:9200/_cluster/health'
|
||||
```
|
||||
|
||||
* Once the cluster is green we can scale down again:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets my-release-elasticsearch-data --replicas=0
|
||||
```
|
||||
|
||||
* Wait for the cluster to be green again.
|
||||
* OK. We now have all data nodes running in the new cluster. Time to replace the
|
||||
masters by firstly scaling down the masters from 3 to 2. Between each step make
|
||||
sure to wait for the cluster to become green again, and check with
|
||||
`curl localhost:9200/_cat/nodes` that you see the correct amount of master
|
||||
nodes. During this process we will always make sure to keep at least 2 master
|
||||
nodes as to not lose quorum:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets my-release-elasticsearch-master --replicas=2
|
||||
```
|
||||
|
||||
* Now deploy a single new master so that we have 3 masters again. See
|
||||
[master.yaml][] for the configuration:
|
||||
|
||||
```
|
||||
make master
|
||||
```
|
||||
|
||||
* Scale down old masters to 1:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets my-release-elasticsearch-master --replicas=1
|
||||
```
|
||||
|
||||
* Edit the masters in [masters.yaml][] to 2 and redeploy:
|
||||
|
||||
```
|
||||
make master
|
||||
```
|
||||
|
||||
* Scale down the old masters to 0:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets my-release-elasticsearch-master --replicas=0
|
||||
```
|
||||
|
||||
* Edit the [masters.yaml][] to have 3 replicas and remove the
|
||||
`discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the
|
||||
masters. This will make sure all 3 masters are running in the new cluster and
|
||||
are pointing at each other for discovery:
|
||||
|
||||
```
|
||||
make master
|
||||
```
|
||||
|
||||
* Remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then
|
||||
redeploy the data nodes to make sure they are pointing at the new masters:
|
||||
|
||||
```
|
||||
make data
|
||||
```
|
||||
|
||||
* Deploy the client nodes:
|
||||
|
||||
```
|
||||
make client
|
||||
```
|
||||
|
||||
* Update any processes that are talking to the existing client nodes and point
|
||||
them to the new client nodes. Once this is done you can scale down the old
|
||||
client nodes:
|
||||
|
||||
```
|
||||
kubectl scale deployment my-release-elasticsearch-client --replicas=0
|
||||
```
|
||||
|
||||
* The migration should now be complete. After verifying that everything is
|
||||
working correctly you can cleanup leftover resources from your old cluster.
|
||||
|
||||
[basic license]: https://www.elastic.co/subscriptions
|
||||
[data.yaml]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/migration/data.yaml
|
||||
[helm/charts]: https://github.com/helm/charts/tree/master/stable/elasticsearch
|
||||
[master.yaml]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/migration/master.yaml
|
||||
[restoring to a different cluster guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-snapshots.html#_restoring_to_a_different_cluster
|
||||
[rolling upgrades guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
replicas: 2
|
||||
|
||||
clusterName: "elasticsearch"
|
||||
nodeGroup: "client"
|
||||
|
||||
esMajorVersion: 6
|
||||
|
||||
roles: []
|
||||
|
||||
volumeClaimTemplate:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: "standard"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi # Currently needed till pvcs are made optional
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
replicas: 2
|
||||
|
||||
esMajorVersion: 6
|
||||
|
||||
extraEnvs:
|
||||
- name: discovery.zen.ping.unicast.hosts
|
||||
value: "my-release-elasticsearch-discovery"
|
||||
|
||||
clusterName: "elasticsearch"
|
||||
nodeGroup: "data"
|
||||
|
||||
roles:
|
||||
- data
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Temporarily set to 3 so we can scale up/down the old a new cluster
|
||||
# one at a time whilst always keeping 3 masters running
|
||||
replicas: 1
|
||||
|
||||
esMajorVersion: 6
|
||||
|
||||
extraEnvs:
|
||||
- name: discovery.zen.ping.unicast.hosts
|
||||
value: "my-release-elasticsearch-discovery"
|
||||
|
||||
clusterName: "elasticsearch"
|
||||
nodeGroup: "master"
|
||||
|
||||
roles:
|
||||
- master
|
||||
|
||||
volumeClaimTemplate:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: "standard"
|
||||
resources:
|
||||
requests:
|
||||
storage: 4Gi
|
||||
@@ -0,0 +1,13 @@
|
||||
default: test
|
||||
|
||||
RELEASE := helm-es-minikube
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
test: install
|
||||
helm test $(RELEASE)
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,38 @@
|
||||
# Minikube
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [Minikube][]
|
||||
using [custom values][].
|
||||
|
||||
If helm or kubectl timeouts occur, you may consider creating a minikube VM with
|
||||
more CPU cores or memory allocated.
|
||||
|
||||
Note that this configuration should be used for test only and isn't recommended
|
||||
for production.
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
In order to properly support the required persistent volume claims for the
|
||||
Elasticsearch StatefulSet, the `default-storageclass` and `storage-provisioner`
|
||||
minikube addons must be enabled.
|
||||
|
||||
```
|
||||
minikube addons enable default-storageclass
|
||||
minikube addons enable storage-provisioner
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/elasticsearch-master 9200
|
||||
curl localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
|
||||
[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/minikube/values.yaml
|
||||
[minikube]: https://minikube.sigs.k8s.io/docs/
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Permit co-located instances for solitary minikube virtual machines.
|
||||
antiAffinity: "soft"
|
||||
|
||||
# Shrink default JVM heap.
|
||||
esJavaOpts: "-Xmx128m -Xms128m"
|
||||
|
||||
# Allocate smaller chunks of memory per pod.
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "512M"
|
||||
limits:
|
||||
cpu: "1000m"
|
||||
memory: "512M"
|
||||
|
||||
# Request smaller persistent volumes.
|
||||
volumeClaimTemplate:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "standard"
|
||||
resources:
|
||||
requests:
|
||||
storage: 100M
|
||||
@@ -0,0 +1,19 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
PREFIX := helm-es-multi
|
||||
RELEASE := helm-es-multi-master
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../
|
||||
|
||||
test: install goss
|
||||
|
||||
purge:
|
||||
helm del $(PREFIX)-master
|
||||
helm del $(PREFIX)-data
|
||||
helm del $(PREFIX)-client
|
||||
@@ -0,0 +1,29 @@
|
||||
# Multi
|
||||
|
||||
This example deploy an Elasticsearch 8.4.1 cluster composed of 3 different Helm
|
||||
releases:
|
||||
|
||||
- `helm-es-multi-master` for the 3 master nodes using [master values][]
|
||||
- `helm-es-multi-data` for the 3 data nodes using [data values][]
|
||||
- `helm-es-multi-client` for the 3 client nodes using [client values][]
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy the 3 Elasticsearch releases: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/multi-master 9200
|
||||
curl -u elastic:changeme http://localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
You can also run [goss integration tests][] using `make test`
|
||||
|
||||
|
||||
[client values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/client.yaml
|
||||
[data values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/data.yaml
|
||||
[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/test/goss.yaml
|
||||
[master values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/master.yaml
|
||||
@@ -0,0 +1,50 @@
|
||||
---
|
||||
clusterName: "multi"
|
||||
nodeGroup: "client"
|
||||
|
||||
extraEnvs:
|
||||
- name: ELASTIC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: multi-master-credentials
|
||||
key: password
|
||||
- name: xpack.security.enabled
|
||||
value: "true"
|
||||
- name: xpack.security.transport.ssl.enabled
|
||||
value: "true"
|
||||
- name: xpack.security.http.ssl.enabled
|
||||
value: "true"
|
||||
- name: xpack.security.transport.ssl.verification_mode
|
||||
value: "certificate"
|
||||
- name: xpack.security.transport.ssl.key
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.key"
|
||||
- name: xpack.security.transport.ssl.certificate
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.crt"
|
||||
- name: xpack.security.transport.ssl.certificate_authorities
|
||||
value: "/usr/share/elasticsearch/config/certs/ca.crt"
|
||||
- name: xpack.security.http.ssl.key
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.key"
|
||||
- name: xpack.security.http.ssl.certificate
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.crt"
|
||||
- name: xpack.security.http.ssl.certificate_authorities
|
||||
value: "/usr/share/elasticsearch/config/certs/ca.crt"
|
||||
|
||||
roles: []
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
|
||||
# For client nodes, we also need to add an empty node.roles in elasticsearch.yml
|
||||
# This is due to https://github.com/elastic/helm-charts/pull/1186#discussion_r631225687
|
||||
esConfig:
|
||||
elasticsearch.yml: |
|
||||
node.roles: []
|
||||
|
||||
secret:
|
||||
enabled: false
|
||||
|
||||
createCert: false
|
||||
secretMounts:
|
||||
- name: elastic-certificates
|
||||
secretName: multi-master-certs
|
||||
path: /usr/share/elasticsearch/config/certs
|
||||
@@ -0,0 +1,48 @@
|
||||
---
|
||||
clusterName: "multi"
|
||||
nodeGroup: "data"
|
||||
|
||||
extraEnvs:
|
||||
- name: ELASTIC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: multi-master-credentials
|
||||
key: password
|
||||
- name: xpack.security.enabled
|
||||
value: "true"
|
||||
- name: xpack.security.transport.ssl.enabled
|
||||
value: "true"
|
||||
- name: xpack.security.http.ssl.enabled
|
||||
value: "true"
|
||||
- name: xpack.security.transport.ssl.verification_mode
|
||||
value: "certificate"
|
||||
- name: xpack.security.transport.ssl.key
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.key"
|
||||
- name: xpack.security.transport.ssl.certificate
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.crt"
|
||||
- name: xpack.security.transport.ssl.certificate_authorities
|
||||
value: "/usr/share/elasticsearch/config/certs/ca.crt"
|
||||
- name: xpack.security.http.ssl.key
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.key"
|
||||
- name: xpack.security.http.ssl.certificate
|
||||
value: "/usr/share/elasticsearch/config/certs/tls.crt"
|
||||
- name: xpack.security.http.ssl.certificate_authorities
|
||||
value: "/usr/share/elasticsearch/config/certs/ca.crt"
|
||||
|
||||
roles:
|
||||
- data
|
||||
- data_content
|
||||
- data_hot
|
||||
- data_warm
|
||||
- data_cold
|
||||
- data_frozen
|
||||
- ingest
|
||||
|
||||
secret:
|
||||
enabled: false
|
||||
|
||||
createCert: false
|
||||
secretMounts:
|
||||
- name: elastic-certificates
|
||||
secretName: multi-master-certs
|
||||
path: /usr/share/elasticsearch/config/certs
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
clusterName: "multi"
|
||||
nodeGroup: "master"
|
||||
|
||||
roles:
|
||||
- master
|
||||
@@ -0,0 +1,12 @@
|
||||
http:
|
||||
https://localhost:9200/_cluster/health:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
allow-insecure: true
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- "green"
|
||||
- '"cluster_name":"multi"'
|
||||
- '"number_of_nodes":9'
|
||||
- '"number_of_data_nodes":3'
|
||||
@@ -0,0 +1,14 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
RELEASE := helm-es-networkpolicy
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
test: install goss
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,37 @@
|
||||
networkPolicy:
|
||||
http:
|
||||
enabled: true
|
||||
explicitNamespacesSelector:
|
||||
# Accept from namespaces with all those different rules (from whitelisted Pods)
|
||||
matchLabels:
|
||||
role: frontend-http
|
||||
matchExpressions:
|
||||
- {key: role, operator: In, values: [frontend-http]}
|
||||
additionalRules:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
role: frontend-http
|
||||
- podSelector:
|
||||
matchExpressions:
|
||||
- key: role
|
||||
operator: In
|
||||
values:
|
||||
- frontend-http
|
||||
transport:
|
||||
enabled: true
|
||||
allowExternal: true
|
||||
explicitNamespacesSelector:
|
||||
matchLabels:
|
||||
role: frontend-transport
|
||||
matchExpressions:
|
||||
- {key: role, operator: In, values: [frontend-transport]}
|
||||
additionalRules:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
role: frontend-transport
|
||||
- podSelector:
|
||||
matchExpressions:
|
||||
- key: role
|
||||
operator: In
|
||||
values:
|
||||
- frontend-transport
|
||||
@@ -0,0 +1,13 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
RELEASE := elasticsearch
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
test: install goss
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,24 @@
|
||||
# OpenShift
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [OpenShift][]
|
||||
using [custom values][].
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/elasticsearch-master 9200
|
||||
curl localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
You can also run [goss integration tests][] using `make test`
|
||||
|
||||
|
||||
[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/values.yaml
|
||||
[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/test/goss.yaml
|
||||
[openshift]: https://www.openshift.com/
|
||||
@@ -0,0 +1,20 @@
|
||||
http:
|
||||
https://localhost:9200/_cluster/health:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- "green"
|
||||
- '"number_of_nodes":3'
|
||||
- '"number_of_data_nodes":3'
|
||||
|
||||
https://localhost:9200:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- '"number" : "8.4.1"'
|
||||
- '"cluster_name" : "elasticsearch"'
|
||||
- "You Know, for Search"
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
|
||||
securityContext:
|
||||
runAsUser: null
|
||||
|
||||
podSecurityContext:
|
||||
fsGroup: null
|
||||
runAsUser: null
|
||||
|
||||
sysctlInitContainer:
|
||||
enabled: false
|
||||
@@ -0,0 +1,36 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
RELEASE := helm-es-security
|
||||
ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION)
|
||||
TIMEOUT := 1200s
|
||||
|
||||
install:
|
||||
helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
|
||||
|
||||
test: secrets install goss
|
||||
|
||||
purge:
|
||||
kubectl delete secrets elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true
|
||||
helm del $(RELEASE)
|
||||
|
||||
pull-elasticsearch-image:
|
||||
docker pull $(ELASTICSEARCH_IMAGE)
|
||||
|
||||
secrets:
|
||||
docker rm -f elastic-helm-charts-certs || true
|
||||
rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 || true
|
||||
docker run --name elastic-helm-charts-certs -i -w /tmp \
|
||||
$(ELASTICSEARCH_IMAGE) \
|
||||
/bin/sh -c " \
|
||||
elasticsearch-certutil ca --out /tmp/elastic-stack-ca.p12 --pass '' && \
|
||||
elasticsearch-certutil cert --name security-master --dns security-master --ca /tmp/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /tmp/elastic-certificates.p12" && \
|
||||
docker cp elastic-helm-charts-certs:/tmp/elastic-certificates.p12 ./ && \
|
||||
docker rm -f elastic-helm-charts-certs && \
|
||||
openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \
|
||||
openssl x509 -outform der -in elastic-certificate.pem -out elastic-certificate.crt && \
|
||||
kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \
|
||||
kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \
|
||||
kubectl create secret generic elastic-certificate-crt --from-file=elastic-certificate.crt && \
|
||||
rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12
|
||||
@@ -0,0 +1,29 @@
|
||||
# Security
|
||||
|
||||
This example deploy a 3 nodes Elasticsearch 8.4.1 with authentication and
|
||||
autogenerated certificates for TLS (see [values][]).
|
||||
|
||||
Note that this configuration should be used for test only. For a production
|
||||
deployment you should generate SSL certificates following the [official docs][].
|
||||
|
||||
## Usage
|
||||
|
||||
* Create the required secrets: `make secrets`
|
||||
|
||||
* Deploy Elasticsearch chart with the default values: `make install`
|
||||
|
||||
* You can now setup a port forward to query Elasticsearch API:
|
||||
|
||||
```
|
||||
kubectl port-forward svc/security-master 9200
|
||||
curl -u elastic:changeme https://localhost:9200/_cat/indices
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
You can also run [goss integration tests][] using `make test`
|
||||
|
||||
|
||||
[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/test/goss.yaml
|
||||
[official docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-tls.html#node-certificates
|
||||
[values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/values.yaml
|
||||
@@ -0,0 +1,44 @@
|
||||
http:
|
||||
https://security-master:9200/_cluster/health:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
allow-insecure: true
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- "green"
|
||||
- '"number_of_nodes":3'
|
||||
- '"number_of_data_nodes":3'
|
||||
|
||||
https://localhost:9200/:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
allow-insecure: true
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- '"cluster_name" : "security"'
|
||||
- "You Know, for Search"
|
||||
|
||||
https://localhost:9200/_license:
|
||||
status: 200
|
||||
timeout: 2000
|
||||
allow-insecure: true
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
body:
|
||||
- "active"
|
||||
- "basic"
|
||||
|
||||
file:
|
||||
/usr/share/elasticsearch/config/elasticsearch.yml:
|
||||
exists: true
|
||||
contains:
|
||||
- "xpack.security.enabled: true"
|
||||
- "xpack.security.transport.ssl.enabled: true"
|
||||
- "xpack.security.transport.ssl.verification_mode: certificate"
|
||||
- "xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
|
||||
- "xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
|
||||
- "xpack.security.http.ssl.enabled: true"
|
||||
- "xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
|
||||
- "xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
clusterName: "security"
|
||||
nodeGroup: "master"
|
||||
|
||||
createCert: false
|
||||
|
||||
roles:
|
||||
- master
|
||||
- ingest
|
||||
- data
|
||||
|
||||
protocol: https
|
||||
|
||||
esConfig:
|
||||
elasticsearch.yml: |
|
||||
xpack.security.enabled: true
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
xpack.security.transport.ssl.verification_mode: certificate
|
||||
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
|
||||
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
|
||||
xpack.security.http.ssl.enabled: true
|
||||
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
|
||||
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
|
||||
|
||||
secretMounts:
|
||||
- name: elastic-certificates
|
||||
secretName: elastic-certificates
|
||||
path: /usr/share/elasticsearch/config/certs
|
||||
@@ -0,0 +1,19 @@
|
||||
default: test
|
||||
|
||||
include ../../../helpers/examples.mk
|
||||
|
||||
CHART := elasticsearch
|
||||
RELEASE := helm-es-upgrade
|
||||
FROM := 7.17.1 # upgrade from versions before 7.17.1 isn't compatible with 8.x
|
||||
|
||||
install:
|
||||
../../../helpers/upgrade.sh --chart $(CHART) --release $(RELEASE) --from $(FROM)
|
||||
# Rolling upgrade doesn't work when upgrading from clusters with security disabled.
|
||||
# This is because nodes with security enabled can't join a cluster with security disabled.
|
||||
# Every nodes need to be recreated at the same time so they can recreate a cluster with security enabled
|
||||
kubectl delete pod --selector=app=upgrade-master
|
||||
|
||||
test: install goss
|
||||
|
||||
purge:
|
||||
helm del $(RELEASE)
|
||||
@@ -0,0 +1,17 @@
|
||||
# Upgrade
|
||||
|
||||
This example will deploy a 3 node Elasticsearch cluster chart using an old chart
|
||||
version, then upgrade it.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
* Deploy and upgrade Elasticsearch chart with the default values: `make install`
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
You can also run [goss integration tests][] using `make test`.
|
||||
|
||||
|
||||
[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/upgrade/test/goss.yaml
|
||||
@@ -0,0 +1,22 @@
|
||||
http:
|
||||
https://localhost:9200/_cluster/health:
|
||||
status: 200
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
allow-insecure: true
|
||||
timeout: 2000
|
||||
body:
|
||||
- "green"
|
||||
- '"number_of_nodes":3'
|
||||
- '"number_of_data_nodes":3'
|
||||
|
||||
https://localhost:9200:
|
||||
status: 200
|
||||
username: elastic
|
||||
password: "{{ .Env.ELASTIC_PASSWORD }}"
|
||||
allow-insecure: true
|
||||
timeout: 2000
|
||||
body:
|
||||
- '"number" : "8.4.1"'
|
||||
- '"cluster_name" : "upgrade"'
|
||||
- "You Know, for Search"
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
clusterName: upgrade
|
||||
# Rolling upgrade doesn't work when upgrading from clusters with security disabled.
|
||||
# This is because nodes with security enabled can't join a cluster with security disabled.
|
||||
# Every nodes need to be recreated at the same time so they can recreate a cluster with security enabled
|
||||
updateStrategy: OnDelete
|
||||
Reference in New Issue
Block a user