update
This commit is contained in:
52
kubespray/tests/scripts/ansibl8s_test.sh
Normal file
52
kubespray/tests/scripts/ansibl8s_test.sh
Normal file
@@ -0,0 +1,52 @@
|
||||
#! /bin/bash
|
||||
|
||||
global_setup() {
|
||||
git clone https://github.com/ansibl8s/setup-kubernetes.git setup-kubernetes
|
||||
private_key=""
|
||||
if [ ! -z ${PRIVATE_KEY_FILE} ]
|
||||
then
|
||||
private_key="--private-key=${PRIVATE_KEY_FILE}"
|
||||
fi
|
||||
ansible-playbook create.yml -i hosts -u admin -s \
|
||||
-e test_id=${TEST_ID} \
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} \
|
||||
-e aws_access_key=${AWS_ACCESS_KEY} \
|
||||
-e aws_secret_key=${AWS_SECRET_KEY} \
|
||||
-e aws_ami_id=${AWS_AMI_ID} \
|
||||
-e aws_security_group=${AWS_SECURITY_GROUP} \
|
||||
-e key_name=${AWS_KEY_PAIR_NAME} \
|
||||
-e inventory_path=${PWD}/inventory.ini \
|
||||
-e aws_region=${AWS_REGION}
|
||||
}
|
||||
|
||||
global_teardown() {
|
||||
if [ -f inventory.ini ];
|
||||
then
|
||||
ansible-playbook -i inventory.ini -u admin delete.yml
|
||||
fi
|
||||
rm -rf ${PWD}/setup-kubernetes
|
||||
}
|
||||
|
||||
should_deploy_cluster() {
|
||||
ansible-playbook -i inventory.ini -s ${private_key} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml
|
||||
|
||||
assertion__status_code_is_success $?
|
||||
}
|
||||
|
||||
should_api_server_respond() {
|
||||
ansible-playbook -i inventory.ini ${private_key} testcases/010_check-apiserver.yml
|
||||
|
||||
assertion__status_code_is_success $?
|
||||
}
|
||||
|
||||
should_pod_be_in_expected_subnet() {
|
||||
ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv
|
||||
|
||||
assertion__status_code_is_success $?
|
||||
}
|
||||
|
||||
should_resolve_cluster_dns() {
|
||||
ansible-playbook -i inventory.ini -s ${private_key} testcases/040_check-network-adv.yml -vv
|
||||
|
||||
assertion__status_code_is_success $?
|
||||
}
|
||||
33
kubespray/tests/scripts/check_readme_versions.sh
Executable file
33
kubespray/tests/scripts/check_readme_versions.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
TARGET_COMPONENTS="containerd calico cilium flannel kube-ovn kube-router weave cert-manager krew helm metallb registry cephfs-provisioner rbd-provisioner aws-ebs-csi-plugin azure-csi-plugin cinder-csi-plugin gcp-pd-csi-plugin local-path-provisioner local-volume-provisioner kube-vip ingress-nginx"
|
||||
|
||||
# cd to the root directory of kubespray
|
||||
cd $(dirname $0)/../../
|
||||
|
||||
echo checking kubernetes..
|
||||
version_from_default=$(grep "^kube_version:" ./roles/kubespray-defaults/defaults/main.yaml | awk '{print $2}' | sed s/\"//g)
|
||||
version_from_readme=$(grep " \[kubernetes\]" ./README.md | awk '{print $3}')
|
||||
if [ "${version_from_default}" != "${version_from_readme}" ]; then
|
||||
echo "The version of kubernetes is different between main.yml(${version_from_default}) and README.md(${version_from_readme})."
|
||||
echo "If the pull request updates kubernetes version, please update README.md also."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for component in $(echo ${TARGET_COMPONENTS}); do
|
||||
echo checking ${component}..
|
||||
version_from_default=$(grep "^$(echo ${component} | sed s/"-"/"_"/g)_version:" ./roles/download/defaults/main.yml | awk '{print $2}' | sed s/\"//g | sed s/^v//)
|
||||
if [ "${version_from_default}" = "" ]; then
|
||||
version_from_default=$(grep "^$(echo ${component} | sed s/"-"/"_"/g)_version:" ./roles/kubernetes/node/defaults/main.yml | awk '{print $2}' | sed s/\"//g | sed s/^v//)
|
||||
fi
|
||||
version_from_readme=$(grep "\[${component}\]" ./README.md | grep "https" | awk '{print $3}' | sed s/^v//)
|
||||
if [ "${version_from_default}" != "${version_from_readme}" ]; then
|
||||
echo "The version of ${component} is different between main.yml(${version_from_default}) and README.md(${version_from_readme})."
|
||||
echo "If the pull request updates ${component} version, please update README.md also."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Succeeded to check all components."
|
||||
exit 0
|
||||
12
kubespray/tests/scripts/check_typo.sh
Executable file
12
kubespray/tests/scripts/check_typo.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# cd to the root directory of kubespray
|
||||
cd $(dirname $0)/../../
|
||||
|
||||
rm ./misspell*
|
||||
|
||||
set -e
|
||||
wget https://github.com/client9/misspell/releases/download/v0.3.4/misspell_0.3.4_linux_64bit.tar.gz
|
||||
tar -zxvf ./misspell_0.3.4_linux_64bit.tar.gz
|
||||
chmod 755 ./misspell
|
||||
git ls-files | xargs ./misspell -error
|
||||
5
kubespray/tests/scripts/create-tf.sh
Executable file
5
kubespray/tests/scripts/create-tf.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
cd ..
|
||||
terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1
|
||||
5
kubespray/tests/scripts/delete-tf.sh
Executable file
5
kubespray/tests/scripts/delete-tf.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
cd ..
|
||||
terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve
|
||||
96
kubespray/tests/scripts/md-table/main.py
Executable file
96
kubespray/tests/scripts/md-table/main.py
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python
|
||||
import argparse
|
||||
import sys
|
||||
import glob
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from pydblite import Base
|
||||
import re
|
||||
import jinja2
|
||||
import sys
|
||||
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Generate a Markdown table representing the CI test coverage')
|
||||
parser.add_argument('--dir', default='tests/files/', help='folder with test yml files')
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
p = Path(args.dir)
|
||||
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=sys.path[0]))
|
||||
|
||||
# Data represents CI coverage data matrix
|
||||
class Data:
|
||||
def __init__(self):
|
||||
self.db = Base(':memory:')
|
||||
self.db.create('container_manager', 'network_plugin', 'operating_system')
|
||||
|
||||
|
||||
def set(self, container_manager, network_plugin, operating_system):
|
||||
self.db.insert(container_manager=container_manager, network_plugin=network_plugin, operating_system=operating_system)
|
||||
self.db.commit()
|
||||
def exists(self, container_manager, network_plugin, operating_system):
|
||||
return len((self.db("container_manager") == container_manager) & (self.db("network_plugin") == network_plugin) & (self.db("operating_system") == operating_system)) > 0
|
||||
|
||||
def jinja(self):
|
||||
template = env.get_template('table.md.j2')
|
||||
container_engines = list(self.db.get_unique_ids('container_manager'))
|
||||
network_plugins = list(self.db.get_unique_ids("network_plugin"))
|
||||
operating_systems = list(self.db.get_unique_ids("operating_system"))
|
||||
|
||||
container_engines.sort()
|
||||
network_plugins.sort()
|
||||
operating_systems.sort()
|
||||
|
||||
return template.render(
|
||||
container_engines=container_engines,
|
||||
network_plugins=network_plugins,
|
||||
operating_systems=operating_systems,
|
||||
exists=self.exists
|
||||
)
|
||||
|
||||
def markdown(self):
|
||||
out = ''
|
||||
for container_manager in self.db.get_unique_ids('container_manager'):
|
||||
# Prepare the headers
|
||||
out += "# " + container_manager + "\n"
|
||||
headers = '|OS / CNI| '
|
||||
underline = '|----|'
|
||||
for network_plugin in self.db.get_unique_ids("network_plugin"):
|
||||
headers += network_plugin + ' | '
|
||||
underline += '----|'
|
||||
out += headers + "\n" + underline + "\n"
|
||||
for operating_system in self.db.get_unique_ids("operating_system"):
|
||||
out += '| ' + operating_system + ' | '
|
||||
for network_plugin in self.db.get_unique_ids("network_plugin"):
|
||||
if self.exists(container_manager, network_plugin, operating_system):
|
||||
emoji = ':white_check_mark:'
|
||||
else:
|
||||
emoji = ':x:'
|
||||
out += emoji + ' | '
|
||||
out += "\n"
|
||||
|
||||
pprint(self.db.get_unique_ids('operating_system'))
|
||||
pprint(self.db.get_unique_ids('network_plugin'))
|
||||
return out
|
||||
|
||||
|
||||
|
||||
if not p.is_dir():
|
||||
print("Path is not a directory")
|
||||
sys.exit(2)
|
||||
|
||||
data = Data()
|
||||
files = p.glob('*.yml')
|
||||
for f in files:
|
||||
y = yaml.load(f.open(), Loader=yaml.FullLoader)
|
||||
|
||||
container_manager = y.get('container_manager', 'containerd')
|
||||
network_plugin = y.get('kube_network_plugin', 'calico')
|
||||
x = re.match(r"^[a-z-]+_([a-z0-9]+).*", f.name)
|
||||
operating_system = x.group(1)
|
||||
data.set(container_manager=container_manager, network_plugin=network_plugin, operating_system=operating_system)
|
||||
#print(data.markdown())
|
||||
print(data.jinja())
|
||||
4
kubespray/tests/scripts/md-table/requirements.txt
Normal file
4
kubespray/tests/scripts/md-table/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
pyaml
|
||||
jinja2
|
||||
pathlib ; python_version < '3.10'
|
||||
pydblite
|
||||
15
kubespray/tests/scripts/md-table/table.md.j2
Normal file
15
kubespray/tests/scripts/md-table/table.md.j2
Normal file
@@ -0,0 +1,15 @@
|
||||
# CI test coverage
|
||||
|
||||
To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||
|
||||
{%- for container_engine in container_engines %}
|
||||
|
||||
## {{ container_engine }}
|
||||
|
||||
| OS / CNI |{% for cni in network_plugins %} {{ cni }} |{% endfor %}
|
||||
|---|{% for cni in network_plugins %} --- |{% endfor %}
|
||||
{%- for os in operating_systems %}
|
||||
{{ os }} | {% for cni in network_plugins %} {{ ':white_check_mark:' if exists(container_engine, cni, os) else ':x:' }} |{% endfor %}
|
||||
{%- endfor %}
|
||||
|
||||
{%- endfor %}
|
||||
11
kubespray/tests/scripts/md-table/test.sh
Executable file
11
kubespray/tests/scripts/md-table/test.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
echo "Install requirements..."
|
||||
pip install -r ./tests/scripts/md-table/requirements.txt
|
||||
|
||||
echo "Generate current file..."
|
||||
./tests/scripts/md-table/main.py > tmp.md
|
||||
|
||||
echo "Compare docs/ci.md with actual tests in tests/files/*.yml ..."
|
||||
cmp docs/ci.md tmp.md
|
||||
9
kubespray/tests/scripts/molecule_logs.sh
Executable file
9
kubespray/tests/scripts/molecule_logs.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Ensure a clean environent
|
||||
rm -fr molecule_logs
|
||||
mkdir -p molecule_logs
|
||||
|
||||
# Collect and archive the logs
|
||||
find ~/.cache/molecule/ -name \*.out -o -name \*.err -type f | xargs tar -uf molecule_logs/molecule.tar
|
||||
gzip molecule_logs/molecule.tar
|
||||
34
kubespray/tests/scripts/molecule_run.sh
Executable file
34
kubespray/tests/scripts/molecule_run.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail -o noglob
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
export LANG=C.UTF-8
|
||||
|
||||
_PATH='roles'
|
||||
_EXCLUDE=""
|
||||
|
||||
while [[ $# -gt 0 ]] ; do
|
||||
case $1 in
|
||||
-e|--exclude)
|
||||
_EXCLUDE="${_EXCLUDE} -not -path ${_PATH}/$2/*"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
-i|--include)
|
||||
_PATH="${_PATH}/$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: molecule_run.sh [-h|--help] [-e|--exclude] [-i|--include]"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
for d in $(find ${_PATH} ${_EXCLUDE} -name molecule -type d)
|
||||
do
|
||||
pushd $(dirname $d)
|
||||
molecule test --all
|
||||
popd
|
||||
done
|
||||
15
kubespray/tests/scripts/rebase.sh
Executable file
15
kubespray/tests/scripts/rebase.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
KUBESPRAY_NEXT_VERSION=2.$(( ${KUBESPRAY_VERSION:3:2} + 1 ))
|
||||
|
||||
# Rebase PRs on master (or release branch) to get latest changes
|
||||
if [[ $CI_COMMIT_REF_NAME == pr-* ]]; then
|
||||
git config user.email "ci@kubespray.io"
|
||||
git config user.name "CI"
|
||||
if [[ -z "`git branch -a --list origin/release-$KUBESPRAY_NEXT_VERSION`" ]]; then
|
||||
git pull --rebase origin master
|
||||
else
|
||||
git pull --rebase origin release-$KUBESPRAY_NEXT_VERSION
|
||||
fi
|
||||
fi
|
||||
6
kubespray/tests/scripts/terraform_install.sh
Executable file
6
kubespray/tests/scripts/terraform_install.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
apt-get install -y unzip
|
||||
curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
|
||||
unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
|
||||
9
kubespray/tests/scripts/testcases_cleanup.sh
Executable file
9
kubespray/tests/scripts/testcases_cleanup.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
cd tests && make delete-${CI_PLATFORM} -s ; cd -
|
||||
|
||||
if [ -d ~/.ara ] ; then
|
||||
tar czvf ${CI_PROJECT_DIR}/cluster-dump/ara.tgz ~/.ara
|
||||
rm -fr ~/.ara
|
||||
fi
|
||||
18
kubespray/tests/scripts/testcases_prepare.sh
Executable file
18
kubespray/tests/scripts/testcases_prepare.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
: ${ANSIBLE_MAJOR_VERSION:=2.12}
|
||||
|
||||
/usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
/usr/bin/python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
mkdir -p /.ssh
|
||||
mkdir -p cluster-dump
|
||||
mkdir -p $HOME/.ssh
|
||||
ansible-playbook --version
|
||||
|
||||
# in some cases we may need to bring in collections or roles from ansible-galaxy
|
||||
# to compensate for missing functionality in older ansible versions
|
||||
if [ -f requirements-${ANSIBLE_MAJOR_VERSION}.yml ] ; then
|
||||
ansible-galaxy role install -r requirements-${ANSIBLE_MAJOR_VERSION}.yml
|
||||
ansible-galaxy collection install -r requirements-${ANSIBLE_MAJOR_VERSION}.yml
|
||||
fi
|
||||
129
kubespray/tests/scripts/testcases_run.sh
Executable file
129
kubespray/tests/scripts/testcases_run.sh
Executable file
@@ -0,0 +1,129 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
echo "CI_JOB_NAME is $CI_JOB_NAME"
|
||||
CI_TEST_ADDITIONAL_VARS=""
|
||||
|
||||
if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then
|
||||
if [ "${UPGRADE_TEST}" == "false" ]; then
|
||||
echo "Job name contains 'upgrade', but UPGRADE_TEST='false'"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# needed for ara not to complain
|
||||
export TZ=UTC
|
||||
|
||||
export ANSIBLE_REMOTE_USER=$SSH_USER
|
||||
export ANSIBLE_BECOME=true
|
||||
export ANSIBLE_BECOME_USER=root
|
||||
export ANSIBLE_CALLBACK_PLUGINS="$(python -m ara.setup.callback_plugins)"
|
||||
|
||||
cd tests && make create-${CI_PLATFORM} -s ; cd -
|
||||
ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml
|
||||
|
||||
# Flatcar Container Linux needs auto update disabled
|
||||
if [[ "$CI_JOB_NAME" =~ "coreos" ]]; then
|
||||
ansible all -m raw -a 'systemctl disable locksmithd'
|
||||
ansible all -m raw -a 'systemctl stop locksmithd'
|
||||
mkdir -p /opt/bin && ln -s /usr/bin/python /opt/bin/python
|
||||
fi
|
||||
|
||||
if [[ "$CI_JOB_NAME" =~ "opensuse" ]]; then
|
||||
# OpenSUSE needs netconfig update to get correct resolv.conf
|
||||
# See https://goinggnu.wordpress.com/2013/10/14/how-to-fix-the-dns-in-opensuse-13-1/
|
||||
ansible all -m raw -a 'netconfig update -f'
|
||||
# Auto import repo keys
|
||||
ansible all -m raw -a 'zypper --gpg-auto-import-keys refresh'
|
||||
fi
|
||||
|
||||
if [[ "$CI_JOB_NAME" =~ "ubuntu" ]]; then
|
||||
# We need to tell ansible that ubuntu hosts are python3 only
|
||||
CI_TEST_ADDITIONAL_VARS="-e ansible_python_interpreter=/usr/bin/python3"
|
||||
fi
|
||||
|
||||
ENABLE_040_TEST="true"
|
||||
if [[ "$CI_JOB_NAME" =~ "hardening" ]]; then
|
||||
# TODO: We need to remove this condition by finding alternative container
|
||||
# image instead of netchecker which doesn't work at hardening environments.
|
||||
ENABLE_040_TEST="false"
|
||||
fi
|
||||
|
||||
# Check out latest tag if testing upgrade
|
||||
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
||||
# Checkout the CI vars file so it is available
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_REGISTRY_MIRROR}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_SETTING}
|
||||
|
||||
# Create cluster
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||
|
||||
# Repeat deployment if testing upgrade
|
||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
|
||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
|
||||
git checkout "${CI_BUILD_REF}"
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
|
||||
fi
|
||||
|
||||
# Test control plane recovery
|
||||
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml
|
||||
fi
|
||||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
|
||||
|
||||
## Test that all nodes are Ready
|
||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/015_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
|
||||
|
||||
## Test that all pods are Running
|
||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/020_check-pods-running.yml $ANSIBLE_LOG_LEVEL
|
||||
|
||||
## Test pod creation and ping between them
|
||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
|
||||
|
||||
## Advanced DNS checks
|
||||
if [ "${ENABLE_040_TEST}" = "true" ]; then
|
||||
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
|
||||
fi
|
||||
|
||||
## Kubernetes conformance tests
|
||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL
|
||||
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
## Idempotency checks 1/5 (repeat deployment)
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} ${CI_TEST_ADDITIONAL_VARS} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||
|
||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
|
||||
|
||||
if [ "${RESET_CHECK}" = "true" ]; then
|
||||
## Idempotency checks 3/5 (reset deployment)
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
|
||||
|
||||
## Idempotency checks 4/5 (redeploy after reset)
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||
|
||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test node removal procedure
|
||||
if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME} --limit "all:!fake_hosts" remove-node.yml
|
||||
fi
|
||||
|
||||
# Clean up at the end, this is to allow stage1 tests to include cleanup test
|
||||
if [ "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
|
||||
fi
|
||||
6
kubespray/tests/scripts/vagrant-validate.sh
Executable file
6
kubespray/tests/scripts/vagrant-validate.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
curl -sL "https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb" -o "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb"
|
||||
dpkg -i "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb"
|
||||
vagrant validate --ignore-provider
|
||||
19
kubespray/tests/scripts/vagrant_clean.sh
Executable file
19
kubespray/tests/scripts/vagrant_clean.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
# Cleanup vagrant VMs to avoid name conflicts
|
||||
|
||||
apt-get install -y libvirt-clients
|
||||
|
||||
for i in $(virsh list --name)
|
||||
do
|
||||
virsh destroy "$i"
|
||||
virsh undefine "$i"
|
||||
done
|
||||
|
||||
|
||||
# Cleanup domain volumes
|
||||
for i in $(virsh vol-list default|grep \.img |grep -v VAGRANTSLASH | cut -f 2 -d ' ')
|
||||
do
|
||||
virsh vol-delete "$i" --pool default
|
||||
done
|
||||
Reference in New Issue
Block a user