kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

View File

@@ -0,0 +1,11 @@
# Custom CNI manifest generation
As an example we are using Cilium for testing the network_plugins/custom_cni.
To update the generated manifests to the latest version do the following:
```sh
helm repo add cilium https://helm.cilium.io/
helm repo update
helm template cilium/cilium -n kube-system -f values.yaml > cilium.yaml
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
---
# We disable hubble so that helm doesn't try to generate any certificate.
# This is not needed to test network_plugin/custom_cni anyway.
hubble:
enabled: false
ipam:
operator:
# Set the appropriate pods subnet
clusterPoolIPv4PodCIDR: "{{ kube_pods_subnet }}"

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: almalinux-8
mode: ha
vm_memory: 3072Mi
# Kubespray settings
calico_bpf_enabled: true
loadbalancer_apiserver_localhost: true
auto_renew_certificates: true

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
enable_nodelocaldns_secondary: true
loadbalancer_apiserver_type: haproxy

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: almalinux-8
mode: ha
# Kubespray settings
auto_renew_certificates: true

View File

@@ -0,0 +1,19 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
metrics_server_enabled: true
dashboard_namespace: "kube-dashboard"
dashboard_enabled: true
loadbalancer_apiserver_type: haproxy
local_path_provisioner_enabled: true
# NTP mangement
ntp_enabled: true
ntp_timezone: Etc/UTC
ntp_manage_config: true
ntp_tinker_panic: true
ntp_force_sync_immediately: true

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
# Kubespray settings
container_manager: crio
auto_renew_certificates: true

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: almalinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
kube_network_plugin: kube-ovn

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: amazon-linux-2
mode: all-in-one

View File

@@ -0,0 +1,18 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
download_localhost: true
download_run_once: true
typha_enabled: true
calico_apiserver_enabled: true
calico_backend: kdd
typha_secure: true
disable_ipv6_dns: true
auto_renew_certificates: true
# Docker settings
container_manager: docker
etcd_deployment_type: docker

View File

@@ -0,0 +1,13 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
download_localhost: false
download_run_once: true
typha_enabled: true
calico_apiserver_enabled: true
calico_backend: kdd
typha_secure: true
auto_renew_certificates: true

View File

@@ -0,0 +1,74 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
kube_proxy_mode: iptables
kube_network_plugin: flannel
download_localhost: false
download_run_once: true
helm_enabled: true
krew_enabled: true
kubernetes_audit: true
etcd_events_cluster_enabled: true
local_volume_provisioner_enabled: true
kube_encrypt_secret_data: true
ingress_nginx_enabled: true
ingress_nginx_webhook_enabled: true
ingress_nginx_webhook_job_ttl: 30
cert_manager_enabled: true
# Disable as health checks are still unstable and slow to respond.
metrics_server_enabled: false
metrics_server_kubelet_insecure_tls: true
kube_token_auth: true
enable_nodelocaldns: false
kubelet_rotate_server_certificates: true
kubelet_csr_approver_enabled: false
kube_oidc_url: https://accounts.google.com/.well-known/openid-configuration
kube_oidc_client_id: kubespray-example
tls_min_version: "VersionTLS12"
tls_cipher_suites:
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
# test etcd tls cipher suites
etcd_tls_cipher_suites:
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
# Containerd
containerd_storage_dir: /var/data/containerd
containerd_state_dir: /run/cri/containerd
containerd_oom_score: -999
# Kube-vip
kube_vip_enabled: true
kube_vip_arp_enabled: true
kube_vip_controlplane_enabled: true
kube_vip_address: 192.168.1.100
# MetalLB
metallb_enabled: true
metallb_speaker_enabled: true
metallb_config:
address_pools:
primary:
ip_range:
- 192.0.1.0-192.0.1.254
auto_assign: true
pool1:
ip_range:
- 192.0.2.1-192.0.2.1
auto_assign: false
pool2:
ip_range:
- 192.0.2.2-192.0.2.2
auto_assign: false
layer2:
- primary
- pool1
- pool2

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: centos-7
mode: default
# Kubespray settings
kube_network_plugin_multus: true

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: centos-7
mode: ha
# Kubespray settings
kube_network_plugin: weave
kubernetes_audit: true
# Needed to upgrade from 1.16 to 1.17, otherwise upgrade is partial and bug followed
upgrade_cluster_setup: true

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: debian-10
mode: default
# Kubespray settings
auto_renew_certificates: true
# plugins
helm_enabled: true
krew_enabled: true

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: debian-10
mode: ha
# Kubespray settings
kube_network_plugin: cilium
enable_network_policy: true
cilium_kube_proxy_replacement: strict

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: debian-10
mode: default
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: debian-10
mode: default
# Kubespray settings
kube_network_plugin: macvlan
enable_nodelocaldns: false
kube_proxy_masquerade_all: true
macvlan_interface: "eth0"
auto_renew_certificates: true

View File

@@ -0,0 +1,16 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Kubespray settings
download_run_once: true
# Pin disabling ipip mode to ensure proper upgrade
ipip: false
calico_pool_blocksize: 26
calico_vxlan_mode: Always
calico_network_backend: bird
# Needed to bypass deprecation check
ignore_assert_errors: true

View File

@@ -0,0 +1,13 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Pin disabling ipip mode to ensure proper upgrade
ipip: false
calico_pool_blocksize: 26
calico_vxlan_mode: Always
calico_network_backend: bird
# Needed to bypass deprecation check
ignore_assert_errors: true

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: debian-11
mode: default

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Kubespray settings
kube_network_plugin: custom_cni
custom_cni_manifests:
- "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml"

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: debian-11
mode: default
# Kubespray settings
kubelet_rotate_server_certificates: true
kubelet_csr_approver_enabled: true
kubelet_csr_approver_values:
# Do not check DNS resolution in testing (not recommended in production)
bypassDnsResolution: true

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: debian-12
mode: default

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: debian-12
mode: default
# Kubespray settings
kube_network_plugin: cilium

View File

@@ -0,0 +1,23 @@
---
# Instance settings
cloud_image: debian-12
mode: default
# Kubespray settings
kube_owner: root
kube_network_plugin: custom_cni
custom_cni_chart_namespace: kube-system
custom_cni_chart_release_name: cilium
custom_cni_chart_repository_name: cilium
custom_cni_chart_repository_url: https://helm.cilium.io
custom_cni_chart_ref: cilium/cilium
custom_cni_chart_version: 1.14.3
custom_cni_chart_values:
cluster:
name: kubespray
hubble:
enabled: false
ipam:
operator:
clusterPoolIPv4PodCIDRList:
- "{{ kube_pods_subnet }}"

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: debian-12
mode: default
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns
docker_repo_key_keyring: /etc/apt/trusted.gpg.d/docker.gpg

View File

@@ -0,0 +1,14 @@
---
# Instance settings
cloud_image: fedora-37
mode: default
# Kubespray settings
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Test with SELinux in enforcing mode
preinstall_selinux_state: enforcing

View File

@@ -0,0 +1,19 @@
---
# Instance settings
cloud_image: fedora-37
mode: default
# Kubespray settings
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Test with SELinux in enforcing mode
preinstall_selinux_state: enforcing
# Test Alpha swap feature by leveraging zswap default config in Fedora 35
kubelet_fail_swap_on: false
kube_feature_gates:
- "NodeSwap=True"

View File

@@ -0,0 +1,15 @@
---
# Instance settings
cloud_image: fedora-37
mode: default
# Kubespray settings
container_manager: crio
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Test with SELinux in enforcing mode
preinstall_selinux_state: enforcing

View File

@@ -0,0 +1,15 @@
---
# Instance settings
cloud_image: fedora-38
mode: default
# Kubespray settings
auto_renew_certificates: true
# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011
# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error
# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace.
kube_proxy_mode: iptables
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker

View File

@@ -0,0 +1,12 @@
---
# Instance settings
cloud_image: fedora-38
mode: default
# Kubespray settings
kube_network_plugin: weave
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: fedora-38
mode: default
# Kubespray settings
kube_network_plugin: kube-ovn

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: opensuse-leap-15
mode: default
# Kubespray settings
kube_network_plugin: cilium
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: rockylinux-8
mode: default
vm_memory: 3072Mi
# Kubespray settings
metrics_server_enabled: true
dashboard_namespace: "kube-dashboard"
dashboard_enabled: true
loadbalancer_apiserver_type: haproxy

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: rockylinux-9
mode: default
vm_memory: 3072Mi
# Kubespray settings
metrics_server_enabled: true
dashboard_namespace: "kube-dashboard"
dashboard_enabled: true
loadbalancer_apiserver_type: haproxy

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: rockylinux-9
mode: default
vm_memory: 3072Mi
# Kubespray settings
kube_network_plugin: cilium
cilium_kube_proxy_replacement: strict

View File

@@ -0,0 +1,16 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: all-in-one
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns

View File

@@ -0,0 +1 @@
packet_ubuntu20-calico-all-in-one.yml

View File

@@ -0,0 +1,106 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: all-in-one
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# The followings are for hardening
## kube-apiserver
authorization_modes: ['Node', 'RBAC']
# AppArmor-based OS
kube_apiserver_feature_gates: ['AppArmor=true']
kube_apiserver_request_timeout: 120s
kube_apiserver_service_account_lookup: true
# enable kubernetes audit
kubernetes_audit: true
audit_log_path: "/var/log/kube-apiserver-log.json"
audit_log_maxage: 30
audit_log_maxbackups: 10
audit_log_maxsize: 100
tls_min_version: VersionTLS12
tls_cipher_suites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
# enable encryption at rest
kube_encrypt_secret_data: true
kube_encryption_resources: [secrets]
kube_encryption_algorithm: "secretbox"
kube_apiserver_enable_admission_plugins:
- EventRateLimit
- AlwaysPullImages
- ServiceAccount
- NamespaceLifecycle
- NodeRestriction
- LimitRanger
- ResourceQuota
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- PodNodeSelector
- PodSecurity
kube_apiserver_admission_control_config_file: true
# EventRateLimit plugin configuration
kube_apiserver_admission_event_rate_limits:
limit_1:
type: Namespace
qps: 50
burst: 100
cache_size: 2000
limit_2:
type: User
qps: 50
burst: 100
kube_profiling: false
## kube-controller-manager
kube_controller_manager_bind_address: 127.0.0.1
kube_controller_terminated_pod_gc_threshold: 50
# AppArmor-based OS
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
## kube-scheduler
kube_scheduler_bind_address: 127.0.0.1
# AppArmor-based OS
kube_scheduler_feature_gates: ["AppArmor=true"]
## etcd
etcd_deployment_type: kubeadm
## kubelet
kubelet_authentication_token_webhook: true
kube_read_only_port: 0
kubelet_rotate_server_certificates: true
kubelet_csr_approver_enabled: false
kubelet_protect_kernel_defaults: true
kubelet_event_record_qps: 1
kubelet_rotate_certificates: true
kubelet_streaming_connection_idle_timeout: "5m"
kubelet_make_iptables_util_chains: true
kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
kubelet_seccomp_default: true
kubelet_systemd_hardening: true
# In case you have multiple interfaces in your
# control plane nodes and you want to specify the right
# IP addresses, kubelet_secure_addresses allows you
# to specify the IP from which the kubelet
# will receive the packets.
# kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations
kube_owner: root
kube_cert_group: root
# create a default Pod Security Configuration and deny running of insecure pods
# kube-system namespace is exempted by default
kube_pod_security_use_default: true
kube_pod_security_default_enforce: restricted

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: all-in-one
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False

View File

@@ -0,0 +1,24 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha
# use the kubeadm etcd setting to test the upgrade
etcd_deployment_type: kubeadm
upgrade_cluster_setup: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# Pin disabling ipip mode to ensure proper upgrade
ipip: false
calico_vxlan_mode: Always
calico_network_backend: bird
# Needed to bypass deprecation check
ignore_assert_errors: true
### FIXME FLORYUT Needed for upgrade job, will be removed when releasing kubespray 2.20
calico_pool_blocksize: 24
### /FIXME

View File

@@ -0,0 +1,11 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: default
# use the kubeadm etcd setting to test the upgrade
etcd_deployment_type: kubeadm
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha-recover-noquorum

View File

@@ -0,0 +1,4 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha-recover

View File

@@ -0,0 +1,13 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha
# Kubespray settings
calico_wireguard_enabled: true
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
# KVM kernel used by packet instances is missing the dummy.ko kernel module so it cannot enable nodelocaldns
enable_nodelocaldns: false

View File

@@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
kube_network_plugin: cilium
enable_network_policy: true
auto_renew_certificates: true

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: default
# Kubespray settings
container_manager: crio
download_localhost: false
download_run_once: true

View File

@@ -0,0 +1,16 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
kube_network_plugin: weave
auto_renew_certificates: true
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns
# Ubuntu 16 - docker containerd package available stopped at 1.4.6
docker_containerd_version: latest

View File

@@ -0,0 +1,22 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha
# Kubespray settings
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
kube_proxy_mode: iptables
kube_network_plugin: flannel
helm_enabled: true
krew_enabled: true
kubernetes_audit: true
etcd_events_cluster_enabled: true
local_volume_provisioner_enabled: true
kube_encrypt_secret_data: true
ingress_nginx_enabled: true
cert_manager_enabled: true
# Disable as health checks are still unstable and slow to respond.
metrics_server_enabled: false
metrics_server_kubelet_insecure_tls: true
kube_token_auth: true
enable_nodelocaldns: false

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: ha
# Kubespray settings
kube_network_plugin: flannel
etcd_deployment_type: kubeadm
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
skip_non_kubeadm_warning: true

View File

@@ -0,0 +1,18 @@
---
# Instance settings
cloud_image: ubuntu-2204
mode: all-in-one
vm_memory: 1600Mi
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
# Use docker
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns
docker_repo_key_keyring: /etc/apt/trusted.gpg.d/docker.gpg

View File

@@ -0,0 +1,24 @@
---
# Instance settings
cloud_image: ubuntu-2204
mode: all-in-one
vm_memory: 1600Mi
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
containerd_registries_mirrors:
- prefix: docker.io
mirrors:
- host: https://mirror.gcr.io
capabilities: ["pull", "resolve"]
skip_verify: false
- prefix: 172.19.16.11:5000
mirrors:
- host: http://172.19.16.11:5000
capabilities: ["pull", "resolve", "push"]
skip_verify: true

View File

@@ -0,0 +1,29 @@
---
# Instance settings
cloud_image: ubuntu-2204
mode: node-etcd-client
vm_memory: 1600Mi
# Kubespray settings
auto_renew_certificates: true
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
kube_proxy_mode: iptables
enable_nodelocaldns: False
containerd_registries:
"docker.io": "https://mirror.gcr.io"
containerd_registries_mirrors:
- prefix: docker.io
mirrors:
- host: https://mirror.gcr.io
capabilities: ["pull", "resolve"]
skip_verify: false
- prefix: 172.19.16.11:5000
mirrors:
- host: http://172.19.16.11:5000
capabilities: ["pull", "resolve", "push"]
skip_verify: true
calico_datastore: "etcd"

View File

@@ -0,0 +1,5 @@
---
sonobuoy_enabled: true
# Ignore ping errors
ignore_assert_errors: true

View File

@@ -0,0 +1,7 @@
---
sonobuoy_enabled: true
pkg_install_retries: 25
retry_stagger: 10
# Ignore ping errors
ignore_assert_errors: true

View File

@@ -0,0 +1,15 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "centos"
$kube_master_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: centos-7
mode: default
# Kubespray settings
kube_network_plugin: kube-router
enable_network_policy: true

View File

@@ -0,0 +1,15 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "fedora37"
$kube_master_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,7 @@
---
# Instance settings
cloud_image: fedora-37
mode: default
# Kubespray settings
kube_network_plugin: kube-router

View File

@@ -0,0 +1,7 @@
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "calico"

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
enable_dual_stack_networks: true

View File

@@ -0,0 +1,9 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -0,0 +1,9 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -0,0 +1,15 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "ubuntu2004"
$kube_master_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router

View File

@@ -0,0 +1,10 @@
$os = "ubuntu2004"
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -0,0 +1,10 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router
kube_router_run_service_proxy: true

View File

@@ -0,0 +1,7 @@
$num_instances = 16
$vm_memory ||= 2048
$os = "ubuntu2004"
$network_plugin = "weave"
$kube_master_instances = 1
$etcd_instances = 1
$playbook = "tests/cloud_playbooks/wait-for-ssh.yml"

View File

@@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: weave