dsk-dev kubespray 이동

This commit is contained in:
ByeonJungHun
2023-12-19 14:31:22 +09:00
parent a35325e16b
commit 5671a92148
2568 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,82 @@
INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
$(HOME)/.ssh/id_rsa:
mkdir -p $(HOME)/.ssh
echo $(PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa
chmod 400 $(HOME)/.ssh/id_rsa
init-gce: $(HOME)/.ssh/id_rsa
# echo $(GCE_PEM_FILE) | base64 -d > $(HOME)/.ssh/gce
echo "$(GCE_CREDENTIALS_B64)" | base64 -d > $(HOME)/.ssh/gce.json
init-do: $(HOME)/.ssh/id_rsa
echo $(DO_PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa
init-packet:
echo $(PACKET_VM_SSH_PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa
chmod 400 $(HOME)/.ssh/id_rsa
create-tf:
./scripts/create-tf.sh
delete-tf:
./scripts/delete-tf.sh
create-gce: init-gce
ansible-playbook cloud_playbooks/create-gce.yml -i local_inventory/hosts.cfg -c local \
$(ANSIBLE_LOG_LEVEL) \
-e @"files/${CI_JOB_NAME}.yml" \
-e gce_credentials_file=$(HOME)/.ssh/gce.json \
-e gce_project_id=$(GCE_PROJECT_ID) \
-e gce_service_account_email=$(GCE_ACCOUNT) \
-e inventory_path=$(INVENTORY) \
-e test_id=$(TEST_ID) \
-e preemptible=$(GCE_PREEMPTIBLE)
delete-gce:
ansible-playbook -i $(INVENTORY) cloud_playbooks/delete-gce.yml -c local \
$(ANSIBLE_LOG_LEVEL) \
-e @"files/${CI_JOB_NAME}.yml" \
-e test_id=$(TEST_ID) \
-e gce_project_id=$(GCE_PROJECT_ID) \
-e gce_service_account_email=$(GCE_ACCOUNT) \
-e gce_credentials_file=$(HOME)/.ssh/gce.json \
-e inventory_path=$(INVENTORY)
create-do: init-do
ansible-playbook cloud_playbooks/create-do.yml -i local_inventory/hosts.cfg -c local \
${ANSIBLE_LOG_LEVEL} \
-e @"files/${CI_JOB_NAME}.yml" \
-e inventory_path=$(INVENTORY) \
-e test_id=${TEST_ID}
delete-do:
ansible-playbook -i $(INVENTORY) cloud_playbooks/create-do.yml -c local \
$(ANSIBLE_LOG_LEVEL) \
-e @"files/${CI_JOB_NAME}.yml" \
-e state=absent \
-e test_id=${TEST_ID} \
-e inventory_path=$(INVENTORY)
create-packet: init-packet
ansible-playbook cloud_playbooks/create-packet.yml -c local \
$(ANSIBLE_LOG_LEVEL) \
-e @"files/${CI_JOB_NAME}.yml" \
-e test_id=$(TEST_ID) \
-e inventory_path=$(INVENTORY)
delete-packet:
ansible-playbook cloud_playbooks/delete-packet.yml -c local \
$(ANSIBLE_LOG_LEVEL) \
-e @"files/${CI_JOB_NAME}.yml" \
-e test_id=$(TEST_ID) \
-e inventory_path=$(INVENTORY)
create-vagrant:
vagrant up
find / -name vagrant_ansible_inventory
cp /builds/kargo-ci/kubernetes-sigs-kubespray/inventory/sample/vagrant_ansible_inventory $(INVENTORY)
delete-vagrant:
vagrant destroy -f

View File

@@ -0,0 +1,40 @@
# Kubespray cloud deployment tests
## Amazon Web Service
| | Calico | Flannel | Weave |
------------- | ------------- | ------------- | ------------- |
Debian Jessie | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-jessie) | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-jessie/) | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-jessie/) |
Ubuntu Trusty |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-trusty/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-trusty/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-trusty)|
RHEL 7.2 |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-rhel72/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-rhel72/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-rhel72/)|
CentOS 7 |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-centos7/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-centos7/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-centos7/)|
## Test environment variables
### Common
Variable | Description | Required | Default
--------------------- | -------------------------------------- | ---------- | --------
`TEST_ID` | A unique execution ID for this test | Yes |
`KUBE_NETWORK_PLUGIN` | The network plugin (calico or flannel) | Yes |
`PRIVATE_KEY_FILE` | The path to the SSH private key file | No |
### AWS Tests
Variable | Description | Required | Default
--------------------- | ----------------------------------------------- | ---------- | ---------
`AWS_ACCESS_KEY` | The Amazon Access Key ID | Yes |
`AWS_SECRET_KEY` | The Amazon Secret Access Key | Yes |
`AWS_AMI_ID` | The AMI ID to deploy | Yes |
`AWS_KEY_PAIR_NAME` | The name of the EC2 key pair to use | Yes |
`AWS_SECURITY_GROUP` | The EC2 Security Group to use | No | default
`AWS_REGION` | The EC2 region | No | eu-central-1
#### Use private ssh key
##### Key
```bash
openssl pkcs12 -in gce-secure.p12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out gce-secure.pem
cat gce-secure.pem |base64 -w0 > GCE_PEM_FILE`
```

View File

@@ -0,0 +1,14 @@
[ssh_connection]
pipelining=True
ansible_ssh_common_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
retries=2
[defaults]
forks = 20
host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp
stdout_callback = skippy
library = ./library:../library
callbacks_enabled = profile_tasks
jinja2_extensions = jinja2.ext.do

View File

@@ -0,0 +1,25 @@
---
- hosts: localhost
become: False
gather_facts: False
tasks:
- name: Provision a set of instances
ec2:
key_name: "{{ aws.key_name }}"
aws_access_key: "{{ aws.access_key }}"
aws_secret_key: "{{ aws.secret_key }}"
region: "{{ aws.region }}"
group_id: "{{ aws.group }}"
instance_type: "{{ aws.instance_type }}"
image: "{{ aws.ami_id }}"
wait: true
count: "{{ aws.count }}"
instance_tags: "{{ aws.tags }}"
register: ec2
- name: Template the inventory
template:
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644

View File

@@ -0,0 +1,91 @@
---
- hosts: localhost
become: false
gather_facts: no
vars:
state: "present"
ssh_key_id: "6536865"
cloud_machine_type: 2gb
regions:
- nyc1
- sfo1
- nyc2
- ams2
- sgp1
- lon1
- nyc3
- ams3
- fra1
- tor1
- sfo2
- blr1
cloud_images:
- fedora-24-x64
- centos-5-x64
- centos-5-x32
- fedora-25-x64
- debian-7-x64
- debian-7-x32
- debian-8-x64
- debian-8-x32
- centos-6-x32
- centos-6-x64
- ubuntu-16-10-x32
- ubuntu-16-10-x64
- freebsd-11-0-x64-zfs
- freebsd-10-3-x64-zfs
- ubuntu-12-04-x32
- ubuntu-12-04-x64
- ubuntu-16-04-x64
- ubuntu-16-04-x32
- ubuntu-14-04-x64
- ubuntu-14-04-x32
- centos-7-x64
- freebsd-11-0-x64
- freebsd-10-3-x64
- centos-7-3-1611-x64
mode: default
tasks:
- name: replace_test_id
set_fact:
test_name: "{{ test_id |regex_replace('\\.', '-') }}"
- name: show vars
debug: msg="{{ cloud_region }}, {{ cloud_image }}"
- name: set instance names
set_fact:
instance_names: >-
{%- if mode in ['separate', 'ha'] -%}
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
{%- else -%}
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2"]
{%- endif -%}
- name: Manage DO instances | {{ state }}
digital_ocean:
unique_name: yes
api_token: "{{ lookup('env','DO_API_TOKEN') }}"
command: "droplet"
image_id: "{{ cloud_image }}"
name: "{{ item }}"
private_networking: no
region_id: "{{ cloud_region }}"
size_id: "{{ cloud_machine_type }}"
ssh_key_ids: "{{ ssh_key_id }}"
state: "{{ state }}"
wait: yes
register: droplets
with_items: "{{ instance_names }}"
- debug: # noqa unnamed-task
msg: "{{ droplets }}, {{ inventory_path }}"
when: state == 'present'
- name: Template the inventory
template:
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644
when: state == 'present'

View File

@@ -0,0 +1,77 @@
---
- hosts: localhost
become: false
gather_facts: no
vars:
cloud_machine_type: g1-small
mode: default
preemptible: no
ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
delete_group_vars: no
tasks:
- name: include vars for test {{ ci_job_name }}
include_vars: "../files/{{ ci_job_name }}.yml"
- name: replace_test_id
set_fact:
test_name: "{{ test_id |regex_replace('\\.', '-') }}"
- name: set instance names
set_fact:
instance_names: >-
{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
{%- elif mode == 'aio' -%}
k8s-{{ test_name }}-1
{%- else -%}
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
{%- endif -%}
- name: Create gce instances
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
machine_type: "{{ cloud_machine_type }}"
image: "{{ cloud_image | default(omit) }}"
image_family: "{{ cloud_image_family | default(omit) }}"
preemptible: "{{ preemptible }}"
service_account_email: "{{ gce_service_account_email }}"
pem_file: "{{ gce_pem_file | default(omit) }}"
credentials_file: "{{ gce_credentials_file | default(omit) }}"
project_id: "{{ gce_project_id }}"
zone: "{{ cloud_region }}"
metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}'
tags: "build-{{ test_name }},{{ kube_network_plugin }}"
ip_forward: yes
service_account_permissions: ['compute-rw']
register: gce
- name: Add instances to host group
add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
with_items: '{{ gce.instance_data }}'
- name: Template the inventory # noqa 404 CI inventory templates are not in role_path
template:
src: ../templates/inventory-gce.j2
dest: "{{ inventory_path }}"
mode: 0644
- name: Make group_vars directory
file:
path: "{{ inventory_path|dirname }}/group_vars"
state: directory
mode: 0755
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
template:
src: ../templates/fake_hosts.yml.j2
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
mode: 0644
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Delete group_vars directory
file:
path: "{{ inventory_path|dirname }}/group_vars"
state: absent
recurse: yes
when: delete_group_vars

View File

@@ -0,0 +1,10 @@
---
- hosts: localhost
gather_facts: no
become: true
vars:
ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
test_name: "{{ test_id | regex_replace('\\.', '-') }}"
roles:
- { role: packet-ci, vm_cleanup: false }

View File

@@ -0,0 +1,18 @@
---
- hosts: kube_node
become: False
tasks:
- name: Gather EC2 facts
action: ec2_facts
- name: Terminate EC2 instances
ec2:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
state: absent
instance_ids: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}"
wait: True
delegate_to: localhost
connection: local

View File

@@ -0,0 +1,48 @@
---
- hosts: localhost
become: false
gather_facts: no
vars:
mode: default
tasks:
- name: replace_test_id
set_fact:
test_name: "{{ test_id |regex_replace('\\.', '-') }}"
- name: set instance names
set_fact:
instance_names: >-
{%- if mode in ['separate', 'ha'] -%}
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
{%- else -%}
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
{%- endif -%}
- name: stop gce instances
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
image: "{{ cloud_image | default(omit) }}"
service_account_email: "{{ gce_service_account_email }}"
pem_file: "{{ gce_pem_file | default(omit) }}"
credentials_file: "{{ gce_credentials_file | default(omit) }}"
project_id: "{{ gce_project_id }}"
zone: "{{ cloud_region | default('europe-west1-b') }}"
state: 'stopped'
async: 120
poll: 3
register: gce
- name: delete gce instances
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
image: "{{ cloud_image | default(omit) }}"
service_account_email: "{{ gce_service_account_email }}"
pem_file: "{{ gce_pem_file | default(omit) }}"
credentials_file: "{{ gce_credentials_file | default(omit) }}"
project_id: "{{ gce_project_id }}"
zone: "{{ cloud_region | default('europe-west1-b') }}"
state: 'absent'
async: 120
poll: 3
register: gce

View File

@@ -0,0 +1,10 @@
---
- hosts: localhost
gather_facts: no
become: true
vars:
ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
test_name: "{{ test_id | regex_replace('\\.', '-') }}"
roles:
- { role: packet-ci, vm_cleanup: true }

View File

@@ -0,0 +1,43 @@
---
# VM sizing
vm_cpu_cores: 2
vm_cpu_sockets: 1
vm_cpu_threads: 2
vm_memory: 2048Mi
# Request/Limit allocation settings
cpu_allocation_ratio: 0.5
memory_allocation_ratio: 1
# Default path for inventory
inventory_path: "/tmp/{{ test_name }}/inventory"
# Deployment mode
mode: aio
# Cloud init config for each os type
# distro: fedora -> I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU=
# distro: rhel: -> I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=
# distro: rhel (+ sudo and hostname packages): -> I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=
# generic one -> I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1
cloud_init:
centos-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
centos-8: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
almalinux-8: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
rockylinux-8: "I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
rockylinux-9: "I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
debian-9: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
debian-10: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
debian-11: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
fedora-35: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
fedora-36: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
opensuse-leap-15: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
rhel-server-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
amazon-linux-2: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
ubuntu-1604: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
ubuntu-1804: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
ubuntu-2004: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
ubuntu-2204: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
oracle-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="

View File

@@ -0,0 +1,49 @@
---
- name: "Create CI namespace {{ test_name }} for test vms"
command: "kubectl create namespace {{ test_name }}"
changed_when: false
- name: "Create temp dir /tmp/{{ test_name }} for CI files"
file:
path: "/tmp/{{ test_name }}"
state: directory
mode: 0755
- name: Template vm files for CI job
template:
src: "vm.yml.j2"
dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
mode: 0644
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
loop_control:
index_var: vm_id
- name: Start vms for CI job
command: "kubectl apply -f /tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
changed_when: false
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
loop_control:
index_var: vm_id
- name: Wait for vms to have ipaddress assigned
shell: "set -o pipefail && kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'"
args:
executable: /bin/bash
changed_when: false
register: vm_ips
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
loop_control:
index_var: vm_id
retries: 20
delay: 15
until:
- vm_ips.stdout | ipaddr
- name: "Create inventory for CI test in file /tmp/{{ test_name }}/inventory"
template:
src: "inventory.j2"
dest: "{{ inventory_path }}"
mode: 0644
vars:
vms: "{{ vm_ips }}"

View File

@@ -0,0 +1,30 @@
---
- name: Check if temp directory for {{ test_name }} exists
stat:
path: "/tmp/{{ test_name }}"
get_attributes: no
get_checksum: no
get_mime: no
register: temp_dir_details
- name: "Cleanup temp directory for {{ test_name }}"
file:
path: "/tmp/{{ test_name }}"
state: absent
- name: "Cleanup namespace for {{ test_name }}"
command: "kubectl delete namespace {{ test_name }}"
changed_when: false
- name: Wait for namespace {{ test_name }} to be fully deleted
command: kubectl get ns {{ test_name }}
register: delete_namespace
failed_when:
- delete_namespace.rc == 0
changed_when:
- delete_namespace.rc == 0
retries: 12
delay: "10"
until:
- delete_namespace.rc != 0

View File

@@ -0,0 +1,16 @@
---
- name: "Include custom vars for ci job: {{ ci_job_name }}"
include_vars: "../files/{{ ci_job_name }}.yml"
- name: Set VM count needed for CI test_id
set_fact:
vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3|int }}{%- elif mode == 'aio' -%}{{ 1|int }}{%- else -%}{{ 2|int }}{%- endif -%}"
- import_tasks: create-vms.yml
when:
- not vm_cleanup
- import_tasks: delete-vms.yml
when:
- vm_cleanup | default(false)

View File

@@ -0,0 +1,93 @@
[all]
{% for instance in vms.results %}
instance-{{ loop.index }} ansible_host={{instance.stdout}}
{% endfor %}
{% if mode is defined and mode in ["separate", "separate-scale"] %}
[kube_control_plane]
instance-1
[kube_node]
instance-2
[etcd]
instance-3
{% elif mode is defined and mode in ["ha", "ha-scale"] %}
[kube_control_plane]
instance-1
instance-2
[kube_node]
instance-3
[etcd]
instance-1
instance-2
instance-3
{% elif mode == "default" %}
[kube_control_plane]
instance-1
[kube_node]
instance-2
[etcd]
instance-1
{% elif mode == "aio" %}
[kube_control_plane]
instance-1
[kube_node]
instance-1
[etcd]
instance-1
{% elif mode == "ha-recover" %}
[kube_control_plane]
instance-1
instance-2
[kube_node]
instance-3
[etcd]
instance-3
instance-1
instance-2
[broken_kube_control_plane]
instance-2
[broken_etcd]
instance-2 etcd_member_name=etcd3
{% elif mode == "ha-recover-noquorum" %}
[kube_control_plane]
instance-3
instance-1
instance-2
[kube_node]
instance-3
[etcd]
instance-3
instance-1
instance-2
[broken_kube_control_plane]
instance-1
instance-2
[broken_etcd]
instance-1 etcd_member_name=etcd2
instance-2 etcd_member_name=etcd3
{% endif %}
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr
[calico_rr]
[fake_hosts]

View File

@@ -0,0 +1,52 @@
---
apiVersion: kubevirt.io/v1alpha3
kind: VirtualMachine
metadata:
name: "instance-{{ vm_id }}"
namespace: "{{ test_name }}"
labels:
kubevirt.io/os: {{ cloud_image }}
spec:
running: true
template:
metadata:
labels:
kubevirt.io/size: small
kubevirt.io/domain: "{{ test_name }}"
spec:
domain:
devices:
blockMultiQueue: true
disks:
- disk:
bus: virtio
name: containervolume
cache: writethrough
- disk:
bus: virtio
name: cloudinitvolume
interfaces:
- name: default
bridge: {}
cpu:
cores: {{ vm_cpu_cores }}
sockets: {{ vm_cpu_sockets }}
threads: {{ vm_cpu_threads }}
resources:
requests:
memory: {{ vm_memory * memory_allocation_ratio }}
cpu: {{ vm_cpu_cores * cpu_allocation_ratio }}
limits:
memory: {{ vm_memory }}
cpu: {{ vm_cpu_cores }}
networks:
- name: default
pod: {}
terminationGracePeriodSeconds: 0
volumes:
- name: containervolume
containerDisk:
image: quay.io/kubespray/vm-{{ cloud_image }}
- name: cloudinitvolume
cloudInitNoCloud:
userDataBase64: {{ cloud_init[cloud_image] }}

View File

@@ -0,0 +1,11 @@
[Credentials]
gs_access_key_id = {{ gs_key }}
gs_secret_access_key = {{ gs_skey }}
[Boto]
https_validate_certificates = True
[GoogleCompute]
[GSUtil]
default_project_id = {{ gce_project_id }}
content_language = en
default_api_version = 2
[OAuth2]

View File

@@ -0,0 +1,9 @@
{
"rule":
[
{
"action": {"type": "Delete"},
"condition": {"age": {{expire_days}}}
}
]
}

View File

@@ -0,0 +1,80 @@
---
- hosts: localhost
become: false
gather_facts: no
vars:
expire_days: 2
tasks:
- name: Generate uniq bucket name prefix
raw: date +%Y%m%d
changed_when: false
register: out
- name: replace_test_id
set_fact:
test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
- name: Set file_name for logs
set_fact:
file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
- name: Create a bucket
gc_storage:
bucket: "{{ test_name }}"
mode: create
permission: public-read
gs_access_key: "{{ gs_key }}"
gs_secret_key: "{{ gs_skey }}"
no_log: True
- name: Create a lifecycle template for the bucket
template:
src: gcs_life.json.j2
dest: "{{ dir }}/gcs_life.json"
mode: 0644
- name: Create a boto config to access GCS
template:
src: boto.j2
dest: "{{ dir }}/.boto"
mode: 0640
no_log: True
- name: Download gsutil cp installer
get_url:
url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
dest: "{{ dir }}/gcp-installer.sh"
- name: Get gsutil tool
script: "{{ dir }}/gcp-installer.sh"
environment:
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
CLOUDSDK_INSTALL_DIR: "{{ dir }}"
no_log: True
failed_when: false
- name: Apply the lifecycle rules # noqa 301
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
changed_when: false
environment:
BOTO_CONFIG: "{{ dir }}/.boto"
no_log: True
- name: Upload collected diagnostic info
gc_storage:
bucket: "{{ test_name }}"
mode: put
permission: public-read
object: "{{ file_name }}"
src: "{{ dir }}/logs.tar.gz"
headers: '{"Content-Encoding": "x-gzip"}'
gs_access_key: "{{ gs_key }}"
gs_secret_key: "{{ gs_skey }}"
expiration: "{{ expire_days * 36000|int }}"
failed_when: false
no_log: True
- debug: # noqa unnamed-task
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"

View File

@@ -0,0 +1,12 @@
---
- hosts: all
become: False
gather_facts: False
tasks:
- name: Wait until SSH is available
wait_for:
host: "{{ ansible_host }}"
port: 22
timeout: 240
delegate_to: localhost

View File

@@ -0,0 +1,34 @@
---
docker_registry_mirrors:
- "https://mirror.gcr.io"
containerd_grpc_max_recv_message_size: 16777216
containerd_grpc_max_send_message_size: 16777216
containerd_registries:
"docker.io":
- "https://mirror.gcr.io"
- "https://registry-1.docker.io"
containerd_max_container_log_line_size: -1
crio_registries:
- prefix: docker.io
insecure: false
blocked: false
unqualified: false
location: registry-1.docker.io
mirrors:
- location: mirror.gcr.io
insecure: false
netcheck_agent_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-agent"
netcheck_server_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-server"
nginx_image_repo: "{{ quay_image_repo }}/kubespray/nginx"
flannel_image_repo: "{{ quay_image_repo}}/kubespray/flannel"
# Kubespray settings for tests
deploy_netchecker: true
dns_min_replicas: 1

View File

@@ -0,0 +1,5 @@
---
# Kubespray settings for tests
deploy_netchecker: true
dns_min_replicas: 1
unsafe_show_logs: true

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: almalinux-8
mode: ha
vm_memory: 3072Mi
# Kubespray settings
calico_bpf_enabled: true
loadbalancer_apiserver_localhost: true
use_localhost_as_kubeapi_loadbalancer: true
auto_renew_certificates: true

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
enable_nodelocaldns_secondary: true
loadbalancer_apiserver_type: haproxy

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: almalinux-8
mode: ha
# Kubespray settings
auto_renew_certificates: true

View File

@@ -0,0 +1,18 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
metrics_server_enabled: true
dashboard_namespace: "kube-dashboard"
dashboard_enabled: true
loadbalancer_apiserver_type: haproxy
# NTP mangement
ntp_enabled: true
ntp_timezone: Etc/UTC
ntp_manage_config: true
ntp_tinker_panic: true
ntp_force_sync_immediately: true

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
# Kubespray settings
container_manager: crio
auto_renew_certificates: true

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
kube_network_plugin: kube-ovn

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: amazon-linux-2
mode: aio

View File

@@ -0,0 +1,18 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
download_localhost: true
download_run_once: true
typha_enabled: true
calico_apiserver_enabled: true
calico_backend: kdd
typha_secure: true
disable_ipv6_dns: true
auto_renew_certificates: true
# Docker settings
container_manager: docker
etcd_deployment_type: docker

View File

@@ -0,0 +1,13 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
download_localhost: false
download_run_once: true
typha_enabled: true
calico_apiserver_enabled: true
calico_backend: kdd
typha_secure: true
auto_renew_certificates: true

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
calico_datastore: etcd
kube_network_plugin: canal
auto_renew_certificates: true

View File

@@ -0,0 +1,50 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
kube_proxy_mode: iptables
kube_network_plugin: flannel
download_localhost: false
download_run_once: true
helm_enabled: true
krew_enabled: true
kubernetes_audit: true
etcd_events_cluster_enabled: true
local_volume_provisioner_enabled: true
kube_encrypt_secret_data: true
ingress_nginx_enabled: true
ingress_nginx_webhook_enabled: true
ingress_nginx_webhook_job_ttl: 30
cert_manager_enabled: true
# Disable as health checks are still unstable and slow to respond.
metrics_server_enabled: false
metrics_server_kubelet_insecure_tls: true
kube_token_auth: true
enable_nodelocaldns: false
kubelet_rotate_server_certificates: true
kube_oidc_url: https://accounts.google.com/.well-known/openid-configuration
kube_oidc_client_id: kubespray-example
tls_min_version: "VersionTLS12"
tls_cipher_suites:
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
# test etcd tls cipher suites
etcd_tls_cipher_suites:
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
# Containerd
containerd_storage_dir: /var/data/containerd
containerd_state_dir: /run/cri/containerd
containerd_oom_score: -999
# Kube-vip
kube_vip_enabled: true
kube_vip_arp_enabled: true
kube_vip_controlplane_enabled: true
kube_vip_address: 192.168.1.100

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: centos-7
mode: default
# Kubespray settings
kube_network_plugin_multus: true

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
kube_network_plugin: weave
kubernetes_audit: true
# Needed to upgrade from 1.16 to 1.17, otherwise upgrade is partial and bug followed
upgrade_cluster_setup: true

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: debian-10
mode: default
# Kubespray settings
auto_renew_certificates: true
# plugins
helm_enabled: true
krew_enabled: true

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: debian-10
mode: ha
# Kubespray settings
kube_network_plugin: cilium
enable_network_policy: true
cilium_kube_proxy_replacement: strict

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: debian-10
mode: default
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,16 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Kubespray settings
download_run_once: true
# Pin disabling ipip mode to ensure proper upgrade
ipip: false
calico_pool_blocksize: 26
calico_vxlan_mode: Always
calico_network_backend: bird
# Needed to bypass deprecation check
ignore_assert_errors: true

View File

@@ -0,0 +1,13 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Pin disabling ipip mode to ensure proper upgrade
ipip: false
calico_pool_blocksize: 26
calico_vxlan_mode: Always
calico_network_backend: bird
# Needed to bypass deprecation check
ignore_assert_errors: true

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: debian-11
mode: default

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: debian-9
mode: default
# Kubespray settings
kube_network_plugin: macvlan
enable_nodelocaldns: false
kube_proxy_masquerade_all: true
macvlan_interface: "eth0"
auto_renew_certificates: true

View File

@@ -0,0 +1,14 @@
---
# Instance settings
cloud_image: fedora-35
mode: default
# Kubespray settings
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Test with SELinux in enforcing mode
preinstall_selinux_state: enforcing

View File

@@ -0,0 +1,19 @@
---
# Instance settings
cloud_image: fedora-35
mode: default
# Kubespray settings
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Test with SELinux in enforcing mode
preinstall_selinux_state: enforcing
# Test Alpha swap feature by leveraging zswap default config in Fedora 35
kubelet_fail_swap_on: False
kube_feature_gates:
- "NodeSwap=True"

View File

@@ -0,0 +1,15 @@
---
# Instance settings
cloud_image: fedora-35
mode: default
# Kubespray settings
container_manager: crio
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Test with SELinux in enforcing mode
preinstall_selinux_state: enforcing

View File

@@ -0,0 +1,15 @@
---
# Instance settings
cloud_image: fedora-36
mode: default
# Kubespray settings
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker

View File

@@ -0,0 +1,12 @@
---
# Instance settings
cloud_image: fedora-36
mode: default
# Kubespray settings
kube_network_plugin: weave
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: fedora-36
mode: default
# Kubespray settings
kube_network_plugin: kube-ovn

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: opensuse-leap-15
mode: default
# Kubespray settings
calico_datastore: etcd
kube_network_plugin: canal
auto_renew_certificates: true

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: opensuse-leap-15
mode: default
# Kubespray settings
kube_network_plugin: cilium
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: rockylinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
metrics_server_enabled: true
dashboard_namespace: "kube-dashboard"
dashboard_enabled: true
loadbalancer_apiserver_type: haproxy

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: rockylinux-9
mode: default
vm_memory: 3072Mi
# Kubespray settings
metrics_server_enabled: true
dashboard_namespace: "kube-dashboard"
dashboard_enabled: true
loadbalancer_apiserver_type: haproxy

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: ha
# Kubespray settings
calico_datastore: etcd
kube_network_plugin: canal

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: separate
# Kubespray settings
calico_datastore: etcd
kube_network_plugin: canal

View File

@@ -0,0 +1,16 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: separate
# Kubespray settings
kube_network_plugin: weave
auto_renew_certificates: true
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns
# Ubuntu 16 - docker containerd package available stopped at 1.4.6
docker_containerd_version: latest

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: ha
# Kubespray settings
kube_network_plugin: flannel
etcd_deployment_type: kubeadm
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
skip_non_kubeadm_warning: true

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: aio
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: aio

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: ha-recover-noquorum

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: ha-recover

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: separate
# Kubespray settings
kube_network_plugin: cilium
enable_network_policy: true
auto_renew_certificates: true

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: default
# Kubespray settings
container_manager: crio
download_localhost: false
download_run_once: true

View File

@@ -0,0 +1,22 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: ha
# Kubespray settings
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
kube_proxy_mode: iptables
kube_network_plugin: flannel
helm_enabled: true
krew_enabled: true
kubernetes_audit: true
etcd_events_cluster_enabled: true
local_volume_provisioner_enabled: true
kube_encrypt_secret_data: true
ingress_nginx_enabled: true
cert_manager_enabled: true
# Disable as health checks are still unstable and slow to respond.
metrics_server_enabled: false
metrics_server_kubelet_insecure_tls: true
kube_token_auth: true
enable_nodelocaldns: false

View File

@@ -0,0 +1,24 @@
---
# Instance settings
cloud_image: ubuntu-1804
mode: ha
# Kubespray settings
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
kube_proxy_mode: iptables
kube_network_plugin: flannel
helm_enabled: true
krew_enabled: true
kubernetes_audit: true
etcd_events_cluster_enabled: true
local_volume_provisioner_enabled: true
kube_encrypt_secret_data: true
ingress_nginx_enabled: true
ingress_nginx_webhook_enabled: true
ingress_nginx_webhook_job_ttl: 30
cert_manager_enabled: true
# Disable as health checks are still unstable and slow to respond.
metrics_server_enabled: false
metrics_server_kubelet_insecure_tls: true
kube_token_auth: true
enable_nodelocaldns: false

View File

@@ -0,0 +1,16 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: aio
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: aio
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False

View File

@@ -0,0 +1,107 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: aio
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# The followings are for hardening
## kube-apiserver
authorization_modes: ['Node', 'RBAC']
# AppArmor-based OS
kube_apiserver_feature_gates: ['AppArmor=true']
kube_apiserver_request_timeout: 120s
kube_apiserver_service_account_lookup: true
# enable kubernetes audit
kubernetes_audit: true
audit_log_path: "/var/log/kube-apiserver-log.json"
audit_log_maxage: 30
audit_log_maxbackups: 10
audit_log_maxsize: 100
tls_min_version: VersionTLS12
tls_cipher_suites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
# enable encryption at rest
kube_encrypt_secret_data: true
kube_encryption_resources: [secrets]
kube_encryption_algorithm: "secretbox"
kube_apiserver_enable_admission_plugins:
- EventRateLimit
- AlwaysPullImages
- ServiceAccount
- NamespaceLifecycle
- NodeRestriction
- LimitRanger
- ResourceQuota
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- PodNodeSelector
- PodSecurity
kube_apiserver_admission_control_config_file: true
# EventRateLimit plugin configuration
kube_apiserver_admission_event_rate_limits:
limit_1:
type: Namespace
qps: 50
burst: 100
cache_size: 2000
limit_2:
type: User
qps: 50
burst: 100
kube_profiling: false
## kube-controller-manager
kube_controller_manager_bind_address: 127.0.0.1
kube_controller_terminated_pod_gc_threshold: 50
# AppArmor-based OS
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
## kube-scheduler
kube_scheduler_bind_address: 127.0.0.1
kube_kubeadm_scheduler_extra_args:
profiling: false
# AppArmor-based OS
kube_scheduler_feature_gates: ["AppArmor=true"]
## etcd
etcd_deployment_type: kubeadm
## kubelet
kubelet_authentication_token_webhook: true
kube_read_only_port: 0
kubelet_rotate_server_certificates: true
kubelet_protect_kernel_defaults: true
kubelet_event_record_qps: 1
kubelet_rotate_certificates: true
kubelet_streaming_connection_idle_timeout: "5m"
kubelet_make_iptables_util_chains: true
kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
kubelet_seccomp_default: true
kubelet_systemd_hardening: true
# In case you have multiple interfaces in your
# control plane nodes and you want to specify the right
# IP addresses, kubelet_secure_addresses allows you
# to specify the IP from which the kubelet
# will receive the packets.
# kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations
kube_owner: root
kube_cert_group: root
# create a default Pod Security Configuration and deny running of insecure pods
# kube-system namespace is exempted by default
kube_pod_security_use_default: true
kube_pod_security_default_enforce: restricted

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: aio
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False

View File

@@ -0,0 +1,24 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha
# use the kubeadm etcd setting to test the upgrade
etcd_deployment_type: kubeadm
upgrade_cluster_setup: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# Pin disabling ipip mode to ensure proper upgrade
ipip: false
calico_vxlan_mode: Always
calico_network_backend: bird
# Needed to bypass deprecation check
ignore_assert_errors: true
### FIXME FLORYUT Needed for upgrade job, will be removed when releasing kubespray 2.20
calico_pool_blocksize: 24
### /FIXME

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: default
# use the kubeadm etcd setting to test the upgrade
etcd_deployment_type: kubeadm
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False

View File

@@ -0,0 +1,13 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha
# Kubespray settings
calico_wireguard_enabled: true
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
# KVM kernel used by packet instances is missing the dummy.ko kernel module so it cannot enable nodelocaldns
enable_nodelocaldns: false

View File

@@ -0,0 +1,17 @@
---
# Instance settings
cloud_image: ubuntu-2204
mode: aio
vm_memory: 1600Mi
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,12 @@
---
# Instance settings
cloud_image: ubuntu-2204
mode: aio
vm_memory: 1600Mi
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False

View File

@@ -0,0 +1,5 @@
---
sonobuoy_enabled: true
# Ignore ping errors
ignore_assert_errors: true

View File

@@ -0,0 +1,7 @@
---
sonobuoy_enabled: true
pkg_install_retries: 25
retry_stagger: 10
# Ignore ping errors
ignore_assert_errors: true

View File

@@ -0,0 +1,15 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "centos"
$kube_master_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: centos-7
mode: default
# Kubespray settings
kube_network_plugin: kube-router
enable_network_policy: true

View File

@@ -0,0 +1,15 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "fedora35"
$kube_master_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: fedora-35
mode: default
# Kubespray settings
kube_network_plugin: kube-router

View File

@@ -0,0 +1,15 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "ubuntu1604"
$kube_master_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router

View File

@@ -0,0 +1,10 @@
$os = "ubuntu1604"
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router
kube_router_run_service_proxy: true

View File

@@ -0,0 +1,7 @@
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "calico"

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
enable_dual_stack_networks: true

View File

@@ -0,0 +1,7 @@
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -0,0 +1,7 @@
$num_instances = 16
$vm_memory ||= 2048
$os = "ubuntu1804"
$network_plugin = "weave"
$kube_master_instances = 1
$etcd_instances = 1
$playbook = "tests/cloud_playbooks/wait-for-ssh.yml"

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: weave

View File

@@ -0,0 +1,9 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -0,0 +1,12 @@
aws:
key_name: "{{ key_name | default('ansibl8s') }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region | default('eu-west-1') }}" # default to eu-west-1
group: "{{ aws_security_group | default ('default')}}"
instance_type: t2.micro
ami_id: "{{ aws_ami_id | default('ami-02724d1f') }}" # default to Debian Jessie
count: 3
tags:
test_id: "{{ test_id }}"
network_plugin: "{{ kube_network_plugin }}"

View File

@@ -0,0 +1 @@
localhost ansible_connection=local

View File

@@ -0,0 +1,11 @@
-r ../requirements-2.11.txt
yamllint==1.19.0
apache-libcloud==2.2.1
tox==3.11.1
dopy==0.3.7
ansible-lint==5.4.0
molecule==3.0.6
molecule-vagrant==0.3
testinfra==5.2.2
python-vagrant==0.5.15
ara[server]==1.5.7

View File

@@ -0,0 +1,11 @@
-r ../requirements-2.12.txt
yamllint==1.19.0
apache-libcloud==2.2.1
tox==3.11.1
dopy==0.3.7
ansible-lint==5.4.0
molecule==3.0.6
molecule-vagrant==0.3
testinfra==5.2.2
python-vagrant==0.5.15
ara[server]==1.5.7

View File

@@ -0,0 +1,11 @@
-r ../requirements-2.12.txt
yamllint==1.19.0
apache-libcloud==2.2.1
tox==3.11.1
dopy==0.3.7
ansible-lint==5.4.0
molecule==3.0.6
molecule-vagrant==0.3
testinfra==5.2.2
python-vagrant==0.5.15
ara[server]==1.5.7

View File

@@ -0,0 +1,8 @@
#! /bin/bash
# curl -# -C - -o shebang-unit https://raw.github.com/arpinum-oss/shebang-unit/master/releases/shebang-unit
# chmod +x shebang-unit
now=$(date +"%Y%m%d%H%M%S")
mkdir -p ${PWD}/tests-results
./shebang-unit --reporters=simple,junit --output-file=${PWD}/tests-results/junit_report-${now}.xml tests

View File

@@ -0,0 +1,52 @@
#! /bin/bash
global_setup() {
git clone https://github.com/ansibl8s/setup-kubernetes.git setup-kubernetes
private_key=""
if [ ! -z ${PRIVATE_KEY_FILE} ]
then
private_key="--private-key=${PRIVATE_KEY_FILE}"
fi
ansible-playbook create.yml -i hosts -u admin -s \
-e test_id=${TEST_ID} \
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} \
-e aws_access_key=${AWS_ACCESS_KEY} \
-e aws_secret_key=${AWS_SECRET_KEY} \
-e aws_ami_id=${AWS_AMI_ID} \
-e aws_security_group=${AWS_SECURITY_GROUP} \
-e key_name=${AWS_KEY_PAIR_NAME} \
-e inventory_path=${PWD}/inventory.ini \
-e aws_region=${AWS_REGION}
}
global_teardown() {
if [ -f inventory.ini ];
then
ansible-playbook -i inventory.ini -u admin delete.yml
fi
rm -rf ${PWD}/setup-kubernetes
}
should_deploy_cluster() {
ansible-playbook -i inventory.ini -s ${private_key} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml
assertion__status_code_is_success $?
}
should_api_server_respond() {
ansible-playbook -i inventory.ini ${private_key} testcases/010_check-apiserver.yml
assertion__status_code_is_success $?
}
should_pod_be_in_expected_subnet() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv
assertion__status_code_is_success $?
}
should_resolve_cluster_dns() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/040_check-network-adv.yml -vv
assertion__status_code_is_success $?
}

Some files were not shown because too many files have changed in this diff Show More