diff --git a/ansible/inventory.ini b/ansible/inventory.ini new file mode 100644 index 0000000..3bdd326 --- /dev/null +++ b/ansible/inventory.ini @@ -0,0 +1,37 @@ +[host] +10.10.43.111 +10.10.43.112 +10.10.43.113 +10.10.43.114 +10.10.43.115 +10.10.43.116 +10.10.43.117 +10.10.43.118 +10.10.43.119 +10.10.43.120 +10.10.43.121 +10.10.43.122 +10.10.43.123 +10.10.43.124 +10.10.43.125 +10.10.43.126 +10.10.43.127 +10.10.43.128 +10.10.43.129 +10.10.43.130 +10.10.43.131 +10.10.43.132 +10.10.43.133 +10.10.43.134 +10.10.43.135 +10.10.43.137 +10.10.43.138 +10.10.43.140 +10.10.43.141 +10.10.43.142 +10.10.43.143 +10.10.43.144 +10.10.43.145 +10.10.43.146 +10.10.43.147 + diff --git a/ansible/node.yaml b/ansible/node.yaml new file mode 100755 index 0000000..b6344cf --- /dev/null +++ b/ansible/node.yaml @@ -0,0 +1,7 @@ +--- +- name: check ls + hosts: all + become: true + roles: + - node + diff --git a/ansible/roles/node/tasks/main.yml b/ansible/roles/node/tasks/main.yml new file mode 100644 index 0000000..0e344f6 --- /dev/null +++ b/ansible/roles/node/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: echo hello + command: echo "Not Valid Ruby Version" + +- name: Update apt repo and cache on all Debian/Ubuntu boxes + apt: update_cache=yes cache_valid_time=3600 + +- name: Install cifs-utils + apt: name=cifs-utils state=latest update_cache=yes + +- name: Install nfs-common + apt: name=nfs-common state=latest update_cache=yes diff --git a/ansible/roles/security-settings/defaults/main.yml b/ansible/roles/security-settings/defaults/main.yml new file mode 100755 index 0000000..7a7c024 --- /dev/null +++ b/ansible/roles/security-settings/defaults/main.yml @@ -0,0 +1,43 @@ +# Password aging settings +os_auth_pw_max_age: 90 +os_auth_pw_min_age: 10 +os_auth_pw_warn_age: 7 +passhistory: 2 + +# Inactivity and Failed attempts lockout settings +fail_deny: 5 +fail_unlock: 0 +inactive_lock: 0 +shell_timeout: 300 + +# tally settings +onerr: 'fail' +deny: 5 +unlock_time: 300 + +# Password complexity settings +pwquality_minlen: 9 +pwquality_maxrepeat: 3 +pwquality_lcredit: -1 +pwquality_ucredit: -1 +pwquality_dcredit: -1 +pwquality_ocredit: -1 + +# SSH settings +sshrootlogin: 'forced-commands-only' +sshmainport: 22 +ssh_service_name: sshd + +# Crictl setup +crictl_app: crictl +crictl_version: 1.25.0 +crictl_os: linux +crictl_arch: amd64 +crictl_dl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/{{ crictl_app }}-v{{ crictl_version }}-{{ crictl_os }}-{{ crictl_arch }}.tar.gz +crictl_bin_path: /usr/local/bin +crictl_file_owner: root +crictl_file_group: root + +# temp +username: +password: diff --git a/ansible/roles/security-settings/files/containerd_default_config.toml b/ansible/roles/security-settings/files/containerd_default_config.toml new file mode 100644 index 0000000..c2f6605 --- /dev/null +++ b/ansible/roles/security-settings/files/containerd_default_config.toml @@ -0,0 +1,39 @@ +version = 2 +root = "/var/lib/containerd" +state = "/run/containerd" +oom_score = 0 + +[grpc] + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + +[debug] + level = "info" + +[metrics] + address = "" + grpc_histogram = false + +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.7" + max_container_log_line_size = -1 + enable_unprivileged_ports = false + enable_unprivileged_icmp = false + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + snapshotter = "overlayfs" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + runtime_engine = "" + runtime_root = "" + base_runtime_spec = "/etc/containerd/cri-base.json" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + systemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["https://registry-1.docker.io"] + diff --git a/ansible/roles/security-settings/files/containerd_dsk_config.toml b/ansible/roles/security-settings/files/containerd_dsk_config.toml new file mode 100644 index 0000000..53550fc --- /dev/null +++ b/ansible/roles/security-settings/files/containerd_dsk_config.toml @@ -0,0 +1,60 @@ +version = 2 +root = "/var/lib/containerd" +state = "/run/containerd" +oom_score = 0 + +[grpc] + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + +[debug] + level = "info" + +[metrics] + address = "" + grpc_histogram = false + +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.7" + max_container_log_line_size = -1 + enable_unprivileged_ports = false + enable_unprivileged_icmp = false + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + snapshotter = "overlayfs" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + runtime_engine = "" + runtime_root = "" + base_runtime_spec = "/etc/containerd/cri-base.json" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + systemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["https://registry-1.docker.io"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.10.31.243:5000"] + endpoint = ["http://10.10.31.243:5000"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.10.43.240:30500"] + endpoint = ["http://10.10.43.240:30500"] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + [plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.31.243:5000".tls] + insecure_skip_verify = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.31.243:5000".auth] + username = "core" + password = "coreadmin1234" + [plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.43.240:30500".tls] + insecure_skip_verify = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.43.240:30500".auth] + username = "dsk" + password = "dskadmin1234" + [plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + diff --git a/ansible/roles/security-settings/files/login_banner b/ansible/roles/security-settings/files/login_banner new file mode 100755 index 0000000..d294eeb --- /dev/null +++ b/ansible/roles/security-settings/files/login_banner @@ -0,0 +1,20 @@ +#!/bin/sh +printf ''' + |-----------------------------------------------------------------| + | This system is for the use of authorized users only. | + | Individuals using this computer system without authority, or in | + | excess of their authority, are subject to having all of their | + | activities on this system monitored and recorded by system | + | personnel. | + | | + | In the course of monitoring individuals improperly using this | + | system, or in the course of system maintenance, the activities | + | of authorized users may also be monitored. | + | | + | Anyone using this system expressly consents to such monitoring | + | and is advised that if such monitoring reveals possible | + | evidence of criminal activity, system personnel may provide the | + | evidence of such monitoring to law enforcement officials. | + |-----------------------------------------------------------------| +''' + diff --git a/ansible/roles/security-settings/files/systemd_limit.conf b/ansible/roles/security-settings/files/systemd_limit.conf new file mode 100644 index 0000000..c4f0c24 --- /dev/null +++ b/ansible/roles/security-settings/files/systemd_limit.conf @@ -0,0 +1,3 @@ +#[Manager] +#DefaultLimitNOFILE=65535:65535 +#DefaultLimitNPROC=65536:65536 diff --git a/ansible/roles/security-settings/handlers/main.yml b/ansible/roles/security-settings/handlers/main.yml new file mode 100755 index 0000000..abab7ef --- /dev/null +++ b/ansible/roles/security-settings/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart sshd + service: + name: "{{ ssh_service_name }}" + state: restarted + enabled: true diff --git a/ansible/roles/security-settings/tasks/admin_set.yml b/ansible/roles/security-settings/tasks/admin_set.yml new file mode 100755 index 0000000..bbd4923 --- /dev/null +++ b/ansible/roles/security-settings/tasks/admin_set.yml @@ -0,0 +1,14 @@ +--- +- name: key add + authorized_key: + user: ubuntu + state: present + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" + manage_dir: False + +- name: user change + user: + name: "{{ username }}" + password: "{{ password | password_hash('sha512') }}" + state: present + diff --git a/ansible/roles/security-settings/tasks/banner.yml b/ansible/roles/security-settings/tasks/banner.yml new file mode 100755 index 0000000..6a172c9 --- /dev/null +++ b/ansible/roles/security-settings/tasks/banner.yml @@ -0,0 +1,29 @@ +--- +- name: Create a tar.gz archive of a single file. + archive: + path: /etc/update-motd.d/* + dest: /etc/update-motd.d/motd.tar.gz + format: gz + force_archive: true + +- name: remove a motd.d files + file: + path: /etc/update-motd.d/{{ item }} + state: absent + with_items: + - 10-help-text + - 85-fwupd + - 90-updates-available + - 91-release-upgrade + - 95-hwe-eol + - 98-fsck-at-reboot + - 50-motd-news + - 88-esm-announce + +- name: Create login banner + copy: + src: login_banner + dest: /etc/update-motd.d/00-header + owner: root + group: root + mode: 0755 diff --git a/ansible/roles/security-settings/tasks/crictl.yml b/ansible/roles/security-settings/tasks/crictl.yml new file mode 100755 index 0000000..2ca551f --- /dev/null +++ b/ansible/roles/security-settings/tasks/crictl.yml @@ -0,0 +1,47 @@ +--- + +#- name: Downloading and extracting {{ crictl_app }} {{ crictl_version }} +# unarchive: +# src: "{{ crictl_dl_url }}" +# dest: "{{ crictl_bin_path }}" +# owner: "{{ crictl_file_owner }}" +# group: "{{ crictl_file_group }}" +# extra_opts: +# - crictl +# remote_src: yes + +- name: Change containerd config + copy: + src: containerd_dsk_config.toml + dest: /etc/containerd/config.toml + owner: root + group: root + mode: 0640 + +- name: Restart service containerd + ansible.builtin.systemd: + state: restarted + daemon_reload: yes + name: containerd + +- name: remove all cronjobs for user root + command: crontab -r -u root + ignore_errors: true + +- name: Crictl command crontab setting + ansible.builtin.cron: + name: "container container prune" + minute: "0" + hour: "3" + user: root + job: "for id in `crictl ps -a | grep -i exited | awk '{print $1}'`; do crictl rm $id ; done" + +- name: Crictl command crontab setting + ansible.builtin.cron: + name: "container image prune" + minute: "10" + hour: "3" + user: root + job: "/usr/local/bin/crictl rmi --prune" + + diff --git a/ansible/roles/security-settings/tasks/login_defs.yml b/ansible/roles/security-settings/tasks/login_defs.yml new file mode 100755 index 0000000..f25702a --- /dev/null +++ b/ansible/roles/security-settings/tasks/login_defs.yml @@ -0,0 +1,48 @@ +--- +- name: Set pass max days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MAX_DAYS.*$' + line: "PASS_MAX_DAYS\t{{os_auth_pw_max_age}}" + backrefs: yes + +- name: Set pass min days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MIN_DAYS.*$' + line: "PASS_MIN_DAYS\t{{os_auth_pw_min_age}}" + backrefs: yes + +- name: Set pass min length + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MIN_LEN.*$' + line: "PASS_MIN_LEN\t{{pwquality_minlen}}" + backrefs: yes + +- name: Set pass warn days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_WARN_AGE.*$' + line: "PASS_WARN_AGE\t{{os_auth_pw_warn_age}}" + backrefs: yes + +- name: Set password encryption to SHA512 + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^ENCRYPT_METHOD\s.*$' + line: "ENCRYPT_METHOD\tSHA512" + backrefs: yes + +- name: Disable MD5 crypt explicitly + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^MD5_CRYPT_ENAB.*$' + line: "MD5_CRYPT_ENAB NO" + backrefs: yes diff --git a/ansible/roles/security-settings/tasks/main.yml b/ansible/roles/security-settings/tasks/main.yml new file mode 100755 index 0000000..17bc3d5 --- /dev/null +++ b/ansible/roles/security-settings/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- include: login_defs.yml + tags: login_defs + +- include: pam.yml + tags: pam + +- include: sshd_config.yml + tags: sshd_config + +- include: profile.yml + tags: profile + +- include: banner.yml + tags: banner + +- include: crictl.yml + tags: circtl + +#- include: admin_set.yml +# tags: admin_set diff --git a/ansible/roles/security-settings/tasks/pam.yml b/ansible/roles/security-settings/tasks/pam.yml new file mode 100755 index 0000000..02de5c3 --- /dev/null +++ b/ansible/roles/security-settings/tasks/pam.yml @@ -0,0 +1,82 @@ +--- +- name: Add pam_tally2.so + template: + src: common-auth.j2 + dest: /etc/pam.d/common-auth + owner: root + group: root + mode: 0644 + +- name: Create pwquality.conf password complexity configuration + block: + - apt: + name: libpam-pwquality + state: present + install_recommends: false + - template: + src: pwquality.conf.j2 + dest: /etc/security/pwquality.conf + owner: root + group: root + mode: 0644 + +- name: Add pam_tally2.so + block: + - lineinfile: + dest: /etc/pam.d/common-account + regexp: '^account\srequisite' + line: "account requisite pam_deny.so" + + - lineinfile: + dest: /etc/pam.d/common-account + regexp: '^account\srequired' + line: "account required pam_tally2.so" + +- name: password reuse is limited + lineinfile: + dest: /etc/pam.d/common-password + line: "password required pam_pwhistory.so remember=5" + +- name: password hashing algorithm is SHA-512 + lineinfile: + dest: /etc/pam.d/common-password + regexp: '^password\s+\[success' + line: "password [success=1 default=ignore] pam_unix.so sha512" + +- name: Shadow Password Suite Parameters + lineinfile: + dest: /etc/pam.d/common-password + regexp: '^password\s+\[success' + line: "password [success=1 default=ignore] pam_unix.so sha512" + +#- name: configure system settings, file descriptors and number of threads +# pam_limits: +# domain: '*' +# limit_type: "{{item.limit_type}}" +# limit_item: "{{item.limit_item}}" +# value: "{{item.value}}" +# with_items: +# - { limit_type: '-', limit_item: 'nofile', value: 65536 } +# - { limit_type: '-', limit_item: 'nproc', value: 65536 } +## - { limit_type: 'soft', limit_item: 'memlock', value: unlimited } +## - { limit_type: 'hard', limit_item: 'memlock', value: unlimited } + +#- name: reload settings from all system configuration files +# shell: sysctl --system + +#- name: Creates directory systemd config +# file: +# path: /etc/systemd/system.conf.d +# state: directory +# owner: root +# group: root +# mode: 0775 + +#- name: Create systemd limits +# copy: +# src: systemd_limit.conf +# dest: /etc/systemd/system.conf.d/limits.conf +# owner: root +# group: root +# mode: 644 + diff --git a/ansible/roles/security-settings/tasks/profile.yml b/ansible/roles/security-settings/tasks/profile.yml new file mode 100755 index 0000000..fb1b456 --- /dev/null +++ b/ansible/roles/security-settings/tasks/profile.yml @@ -0,0 +1,24 @@ +--- +- name: Set session timeout + lineinfile: + dest: /etc/profile + regexp: '^TMOUT=.*' + insertbefore: '^readonly TMOUT' + line: 'TMOUT={{shell_timeout}}' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" + +- name: Set TMOUT readonly + lineinfile: + dest: /etc/profile + regexp: '^readonly TMOUT' + insertafter: 'TMOUT={{shell_timeout}}' + line: 'readonly TMOUT' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" + +- name: Set export TMOUT + lineinfile: + dest: /etc/profile + regexp: '^export TMOUT.*' + insertafter: 'readonly TMOUT' + line: 'export TMOUT' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" diff --git a/ansible/roles/security-settings/tasks/sshd_config.yml b/ansible/roles/security-settings/tasks/sshd_config.yml new file mode 100755 index 0000000..438a65a --- /dev/null +++ b/ansible/roles/security-settings/tasks/sshd_config.yml @@ -0,0 +1,23 @@ +--- +- name: Configure ssh root login to {{sshrootlogin}} + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^(#)?PermitRootLogin.*' + line: 'PermitRootLogin {{sshrootlogin}}' + insertbefore: '^Match.*' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: SSH Listen on Main Port + lineinfile: + dest: /etc/ssh/sshd_config + insertbefore: '^#*AddressFamily' + line: 'Port {{sshmainport}}' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd diff --git a/ansible/roles/security-settings/templates/common-auth.j2 b/ansible/roles/security-settings/templates/common-auth.j2 new file mode 100755 index 0000000..64a603b --- /dev/null +++ b/ansible/roles/security-settings/templates/common-auth.j2 @@ -0,0 +1,27 @@ +# +# /etc/pam.d/common-auth - authentication settings common to all services +# +# This file is included from other service-specific PAM config files, +# and should contain a list of the authentication modules that define +# the central authentication scheme for use on the system +# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the +# traditional Unix authentication mechanisms. +# +# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. +# To take advantage of this, it is recommended that you configure any +# local modules either before or after the default block, and use +# pam-auth-update to manage selection of other modules. See +# pam-auth-update(8) for details. +auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}} + +# here are the per-package modules (the "Primary" block) +auth [success=1 default=ignore] pam_unix.so nullok +# here's the fallback if no module succeeds +auth requisite pam_deny.so +# prime the stack with a positive return value if there isn't one already; +# this avoids us returning an error just because nothing sets a success code +auth required pam_permit.so +# since the modules above will each just jump around +# and here are more per-package modules (the "Additional" block) +auth optional pam_cap.so +# end of pam-auth-update config diff --git a/ansible/roles/security-settings/templates/pwquality.conf.j2 b/ansible/roles/security-settings/templates/pwquality.conf.j2 new file mode 100755 index 0000000..3ec2cbe --- /dev/null +++ b/ansible/roles/security-settings/templates/pwquality.conf.j2 @@ -0,0 +1,50 @@ +# Configuration for systemwide password quality limits +# Defaults: +# +# Number of characters in the new password that must not be present in the +# old password. +# difok = 5 +# +# Minimum acceptable size for the new password (plus one if +# credits are not disabled which is the default). (See pam_cracklib manual.) +# Cannot be set to lower value than 6. +minlen = {{pwquality_minlen}} +# +# The maximum credit for having digits in the new password. If less than 0 +# it is the minimum number of digits in the new password. +dcredit = {{pwquality_dcredit}} +# +# The maximum credit for having uppercase characters in the new password. +# If less than 0 it is the minimum number of uppercase characters in the new +# password. +ucredit = {{pwquality_ucredit}} +# +# The maximum credit for having lowercase characters in the new password. +# If less than 0 it is the minimum number of lowercase characters in the new +# password. +lcredit = {{pwquality_lcredit}} +# +# The maximum credit for having other characters in the new password. +# If less than 0 it is the minimum number of other characters in the new +# password. +ocredit = {{pwquality_ocredit}} +# +# The minimum number of required classes of characters for the new +# password (digits, uppercase, lowercase, others). +# minclass = 0 +# +# The maximum number of allowed consecutive same characters in the new password. +# The check is disabled if the value is 0. +maxrepeat = {{pwquality_maxrepeat}} +# +# The maximum number of allowed consecutive characters of the same class in the +# new password. +# The check is disabled if the value is 0. +# maxclassrepeat = 0 +# +# Whether to check for the words from the passwd entry GECOS string of the user. +# The check is enabled if the value is not 0. +# gecoscheck = 0 +# +# Path to the cracklib dictionaries. Default is to use the cracklib default. +# dictpath = diff --git a/ansible/rsa_key/key.sh b/ansible/rsa_key/key.sh new file mode 100755 index 0000000..70cb25e --- /dev/null +++ b/ansible/rsa_key/key.sh @@ -0,0 +1,9 @@ +#!/usr/bin/expect -f +set password [lindex $argv 0] +set host [lindex $argv 1] + +spawn ssh-copy-id -o StrictHostKeyChecking=no ubuntu@$host +expect "password:" +send "$password\n" +expect eof + diff --git a/ansible/rsa_key/test.sh b/ansible/rsa_key/test.sh new file mode 100755 index 0000000..c4f0cae --- /dev/null +++ b/ansible/rsa_key/test.sh @@ -0,0 +1,13 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +if [ $1 == '' ]; then exit +else; passwd=$1 + +while read ip +do + echo ${ip} + #./key.sh ${passwd} ${ip} + +done < ip_list + diff --git a/ansible/security.yaml b/ansible/security.yaml new file mode 100755 index 0000000..efdbf8a --- /dev/null +++ b/ansible/security.yaml @@ -0,0 +1,9 @@ +--- +- name: check ls + hosts: all + become: true + roles: + - security-settings + vars: + sshrootlogin: 'no' + diff --git a/kops/aws_kops_prod/instancegroup_backup/k8s-prod-data-druid-large.yaml b/kops/aws_kops_prod/instancegroup_backup/k8s-prod-data-druid-large.yaml new file mode 100644 index 0000000..1cddafe --- /dev/null +++ b/kops/aws_kops_prod/instancegroup_backup/k8s-prod-data-druid-large.yaml @@ -0,0 +1,25 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + labels: + kops.k8s.io/cluster: k8s-prod.datasaker.io + name: k8s-prod-data-druid-large +spec: + image: ami-0409b7ddbc59e3222 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m5a.4xlarge + manager: CloudGroup + maxSize: 2 + minSize: 2 + nodeLabels: + datasaker/druid-size: large + datasaker/group: data-druid + kops.k8s.io/instancegroup: k8s-prod-data-druid-large + role: Node + subnets: + - ap-northeast-2c + taints: + - prod/data-druid:NoSchedule diff --git a/kops/aws_kops_prod/instancegroup_backup/k8s-prod-tmp.yaml b/kops/aws_kops_prod/instancegroup_backup/k8s-prod-tmp.yaml new file mode 100644 index 0000000..0b44727 --- /dev/null +++ b/kops/aws_kops_prod/instancegroup_backup/k8s-prod-tmp.yaml @@ -0,0 +1,22 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + labels: + kops.k8s.io/cluster: k8s-prod.datasaker.io + name: k8s-prod-tmp +spec: + image: ami-0409b7ddbc59e3222 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m5a.2xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: tmp + kops.k8s.io/instancegroup: k8s-prod-tmp + role: Node + subnets: + - ap-northeast-2c diff --git a/kubespray/.ansible-lint b/kubespray/.ansible-lint new file mode 100644 index 0000000..048a897 --- /dev/null +++ b/kubespray/.ansible-lint @@ -0,0 +1,30 @@ +--- +parseable: true +skip_list: + # see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules + + # DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary + + # These rules are intentionally skipped: + # + # [E204]: "Lines should be no longer than 160 chars" + # This could be re-enabled with a major rewrite in the future. + # For now, there's not enough value gain from strictly limiting line length. + # (Disabled in May 2019) + - '204' + + # [E701]: "meta/main.yml should contain relevant info" + # Roles in Kubespray are not intended to be used/imported by Ansible Galaxy. + # While it can be useful to have these metadata available, they are also available in the existing documentation. + # (Disabled in May 2019) + - '701' + + # [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern + # Meta roles in Kubespray don't need proper names + # (Disabled in June 2021) + - 'role-name' + + # [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards + # In Kubespray we use variables that use camelCase to match their k8s counterparts + # (Disabled in June 2021) + - 'var-naming' diff --git a/kubespray/.editorconfig b/kubespray/.editorconfig new file mode 100644 index 0000000..6da030f --- /dev/null +++ b/kubespray/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*.{yaml,yml,yml.j2,yaml.j2}] +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true +insert_final_newline = true +charset = utf-8 + +[{Dockerfile}] +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true +insert_final_newline = true +charset = utf-8 diff --git a/kubespray/.github/ISSUE_TEMPLATE/bug-report.md b/kubespray/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000..3ead734 --- /dev/null +++ b/kubespray/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,44 @@ +--- +name: Bug Report +about: Report a bug encountered while operating Kubernetes +labels: kind/bug + +--- + + +**Environment**: +- **Cloud provider or hardware configuration:** + +- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):** + +- **Version of Ansible** (`ansible --version`): + +- **Version of Python** (`python --version`): + + +**Kubespray version (commit) (`git rev-parse --short HEAD`):** + + +**Network plugin used**: + + +**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):** + + +**Command used to invoke ansible**: + + +**Output of ansible run**: + + +**Anything else do we need to know**: + diff --git a/kubespray/.github/ISSUE_TEMPLATE/enhancement.md b/kubespray/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 0000000..3680571 --- /dev/null +++ b/kubespray/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,11 @@ +--- +name: Enhancement Request +about: Suggest an enhancement to the Kubespray project +labels: kind/feature + +--- + + +**What would you like to be added**: + +**Why is this needed**: diff --git a/kubespray/.github/ISSUE_TEMPLATE/failing-test.md b/kubespray/.github/ISSUE_TEMPLATE/failing-test.md new file mode 100644 index 0000000..cb4f1a7 --- /dev/null +++ b/kubespray/.github/ISSUE_TEMPLATE/failing-test.md @@ -0,0 +1,20 @@ +--- +name: Failing Test +about: Report test failures in Kubespray CI jobs +labels: kind/failing-test + +--- + + + +**Which jobs are failing**: + +**Which test(s) are failing**: + +**Since when has it been failing**: + +**Testgrid link**: + +**Reason for failure**: + +**Anything else we need to know**: diff --git a/kubespray/.github/ISSUE_TEMPLATE/support.md b/kubespray/.github/ISSUE_TEMPLATE/support.md new file mode 100644 index 0000000..ddec841 --- /dev/null +++ b/kubespray/.github/ISSUE_TEMPLATE/support.md @@ -0,0 +1,18 @@ +--- +name: Support Request +about: Support request or question relating to Kubespray +labels: kind/support + +--- + + diff --git a/kubespray/.github/PULL_REQUEST_TEMPLATE.md b/kubespray/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..2a4d3c8 --- /dev/null +++ b/kubespray/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,44 @@ + + +**What type of PR is this?** +> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> /kind api-change +> /kind bug +> /kind cleanup +> /kind design +> /kind documentation +> /kind failing-test +> /kind feature +> /kind flake + +**What this PR does / why we need it**: + +**Which issue(s) this PR fixes**: + +Fixes # + +**Special notes for your reviewer**: + +**Does this PR introduce a user-facing change?**: + +```release-note + +``` diff --git a/kubespray/.gitignore b/kubespray/.gitignore new file mode 100644 index 0000000..43bf3d9 --- /dev/null +++ b/kubespray/.gitignore @@ -0,0 +1,115 @@ +.vagrant +*.retry +**/vagrant_ansible_inventory +*.iml +temp +contrib/offline/offline-files +contrib/offline/offline-files.tar.gz +.idea +.vscode +.tox +.cache +*.bak +*.tfstate +*.tfstate.backup +.terraform/ +contrib/terraform/aws/credentials.tfvars +.terraform.lock.hcl +/ssh-bastion.conf +**/*.sw[pon] +*~ +vagrant/ +plugins/mitogen + +# Ansible inventory +inventory/* +!inventory/local +!inventory/sample +inventory/*/artifacts/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +.Python +env/ +build/ +credentials/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# molecule +roles/**/molecule/**/__pycache__/ + +# macOS +.DS_Store + +# Temp location used by our scripts +scripts/tmp/ +tmp.md diff --git a/kubespray/.gitlab-ci.yml b/kubespray/.gitlab-ci.yml new file mode 100644 index 0000000..9af54e0 --- /dev/null +++ b/kubespray/.gitlab-ci.yml @@ -0,0 +1,84 @@ +--- +stages: + - unit-tests + - deploy-part1 + - moderator + - deploy-part2 + - deploy-part3 + - deploy-special + +variables: + KUBESPRAY_VERSION: v2.20.0 + FAILFASTCI_NAMESPACE: 'kargo-ci' + GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' + ANSIBLE_FORCE_COLOR: "true" + MAGIC: "ci check this" + TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID" + CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml" + CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml" + CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml" + GS_ACCESS_KEY_ID: $GS_KEY + GS_SECRET_ACCESS_KEY: $GS_SECRET + CONTAINER_ENGINE: docker + SSH_USER: root + GCE_PREEMPTIBLE: "false" + ANSIBLE_KEEP_REMOTE_FILES: "1" + ANSIBLE_CONFIG: ./tests/ansible.cfg + ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini + IDEMPOT_CHECK: "false" + RESET_CHECK: "false" + REMOVE_NODE_CHECK: "false" + UPGRADE_TEST: "false" + MITOGEN_ENABLE: "false" + ANSIBLE_LOG_LEVEL: "-vv" + RECOVER_CONTROL_PLANE_TEST: "false" + RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" + TERRAFORM_VERSION: 1.0.8 + ANSIBLE_MAJOR_VERSION: "2.11" + +before_script: + - ./tests/scripts/rebase.sh + - update-alternatives --install /usr/bin/python python /usr/bin/python3 1 + - python -m pip uninstall -y ansible ansible-base ansible-core + - python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt + - mkdir -p /.ssh + +.job: &job + tags: + - packet + image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION + artifacts: + when: always + paths: + - cluster-dump/ + +.testcases: &testcases + <<: *job + retry: 1 + before_script: + - update-alternatives --install /usr/bin/python python /usr/bin/python3 1 + - ./tests/scripts/rebase.sh + - ./tests/scripts/testcases_prepare.sh + script: + - ./tests/scripts/testcases_run.sh + after_script: + - chronic ./tests/scripts/testcases_cleanup.sh + +# For failfast, at least 1 job must be defined in .gitlab-ci.yml +# Premoderated with manual actions +ci-authorized: + extends: .job + stage: moderator + script: + - /bin/sh scripts/premoderator.sh + except: ['triggers', 'master'] + # Disable ci moderator + only: [] + +include: + - .gitlab-ci/lint.yml + - .gitlab-ci/shellcheck.yml + - .gitlab-ci/terraform.yml + - .gitlab-ci/packet.yml + - .gitlab-ci/vagrant.yml + - .gitlab-ci/molecule.yml diff --git a/kubespray/.gitlab-ci/lint.yml b/kubespray/.gitlab-ci/lint.yml new file mode 100644 index 0000000..c9e1bde --- /dev/null +++ b/kubespray/.gitlab-ci/lint.yml @@ -0,0 +1,90 @@ +--- +yamllint: + extends: .job + stage: unit-tests + tags: [light] + variables: + LANG: C.UTF-8 + script: + - yamllint --strict . + except: ['triggers', 'master'] + +vagrant-validate: + extends: .job + stage: unit-tests + tags: [light] + variables: + VAGRANT_VERSION: 2.2.19 + script: + - ./tests/scripts/vagrant-validate.sh + except: ['triggers', 'master'] + +ansible-lint: + extends: .job + stage: unit-tests + tags: [light] + script: + - ansible-lint -v + except: ['triggers', 'master'] + +syntax-check: + extends: .job + stage: unit-tests + tags: [light] + variables: + ANSIBLE_INVENTORY: inventory/local-tests.cfg + ANSIBLE_REMOTE_USER: root + ANSIBLE_BECOME: "true" + ANSIBLE_BECOME_USER: root + ANSIBLE_VERBOSITY: "3" + script: + - ansible-playbook --syntax-check cluster.yml + - ansible-playbook --syntax-check upgrade-cluster.yml + - ansible-playbook --syntax-check reset.yml + - ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml + except: ['triggers', 'master'] + +tox-inventory-builder: + stage: unit-tests + tags: [light] + extends: .job + before_script: + - ./tests/scripts/rebase.sh + - apt-get update && apt-get install -y python3-pip + - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 + - python -m pip uninstall -y ansible ansible-base ansible-core + - python -m pip install -r tests/requirements.txt + script: + - pip3 install tox + - cd contrib/inventory_builder && tox + except: ['triggers', 'master'] + +markdownlint: + stage: unit-tests + tags: [light] + image: node + before_script: + - npm install -g markdownlint-cli@0.22.0 + script: + - markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md + +check-readme-versions: + stage: unit-tests + tags: [light] + image: python:3 + script: + - tests/scripts/check_readme_versions.sh + +check-typo: + stage: unit-tests + tags: [light] + image: python:3 + script: + - tests/scripts/check_typo.sh + +ci-matrix: + stage: unit-tests + tags: [light] + image: python:3 + script: + - tests/scripts/md-table/test.sh diff --git a/kubespray/.gitlab-ci/molecule.yml b/kubespray/.gitlab-ci/molecule.yml new file mode 100644 index 0000000..346bf18 --- /dev/null +++ b/kubespray/.gitlab-ci/molecule.yml @@ -0,0 +1,86 @@ +--- + +.molecule: + tags: [c3.small.x86] + only: [/^pr-.*$/] + except: ['triggers'] + image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION + services: [] + stage: deploy-part1 + before_script: + - tests/scripts/rebase.sh + - apt-get update && apt-get install -y python3-pip + - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 + - python -m pip uninstall -y ansible ansible-base ansible-core + - python -m pip install -r tests/requirements.txt + - ./tests/scripts/vagrant_clean.sh + script: + - ./tests/scripts/molecule_run.sh + after_script: + - chronic ./tests/scripts/molecule_logs.sh + artifacts: + when: always + paths: + - molecule_logs/ + +# CI template for periodic CI jobs +# Enabled when PERIODIC_CI_ENABLED var is set +.molecule_periodic: + only: + variables: + - $PERIODIC_CI_ENABLED + allow_failure: true + extends: .molecule + +molecule_full: + extends: .molecule_periodic + +molecule_no_container_engines: + extends: .molecule + script: + - ./tests/scripts/molecule_run.sh -e container-engine + when: on_success + +molecule_docker: + extends: .molecule + script: + - ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd + when: on_success + +molecule_containerd: + extends: .molecule + script: + - ./tests/scripts/molecule_run.sh -i container-engine/containerd + when: on_success + +molecule_cri-o: + extends: .molecule + stage: deploy-part2 + script: + - ./tests/scripts/molecule_run.sh -i container-engine/cri-o + when: on_success + +# Stage 3 container engines don't get as much attention so allow them to fail +molecule_kata: + extends: .molecule + stage: deploy-part3 + allow_failure: true + script: + - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers + when: on_success + +molecule_gvisor: + extends: .molecule + stage: deploy-part3 + allow_failure: true + script: + - ./tests/scripts/molecule_run.sh -i container-engine/gvisor + when: on_success + +molecule_youki: + extends: .molecule + stage: deploy-part3 + allow_failure: true + script: + - ./tests/scripts/molecule_run.sh -i container-engine/youki + when: on_success diff --git a/kubespray/.gitlab-ci/packet.yml b/kubespray/.gitlab-ci/packet.yml new file mode 100644 index 0000000..47b4690 --- /dev/null +++ b/kubespray/.gitlab-ci/packet.yml @@ -0,0 +1,328 @@ +--- +.packet: + extends: .testcases + variables: + ANSIBLE_TIMEOUT: "120" + CI_PLATFORM: packet + SSH_USER: kubespray + tags: + - packet + except: [triggers] + +# CI template for PRs +.packet_pr: + only: [/^pr-.*$/] + extends: .packet + +# CI template for periodic CI jobs +# Enabled when PERIODIC_CI_ENABLED var is set +.packet_periodic: + only: + variables: + - $PERIODIC_CI_ENABLED + allow_failure: true + extends: .packet + +# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken +packet_ubuntu20-calico-aio: + stage: deploy-part1 + extends: .packet_pr + when: on_success + variables: + RESET_CHECK: "true" + +packet_ubuntu20-calico-aio-ansible-2_11: + stage: deploy-part1 + extends: .packet_periodic + when: on_success + variables: + ANSIBLE_MAJOR_VERSION: "2.11" + RESET_CHECK: "true" + +# ### PR JOBS PART2 + +packet_ubuntu18-aio-docker: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_ubuntu20-aio-docker: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_ubuntu20-calico-aio-hardening: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_ubuntu18-calico-aio: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_ubuntu22-aio-docker: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_ubuntu22-calico-aio: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_centos7-flannel-addons-ha: + extends: .packet_pr + stage: deploy-part2 + when: on_success + +packet_almalinux8-crio: + extends: .packet_pr + stage: deploy-part2 + when: on_success + +packet_ubuntu18-crio: + extends: .packet_pr + stage: deploy-part2 + when: manual + +packet_fedora35-crio: + extends: .packet_pr + stage: deploy-part2 + when: manual + +packet_ubuntu16-canal-ha: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + +packet_ubuntu16-canal-sep: + stage: deploy-special + extends: .packet_pr + when: manual + +packet_ubuntu16-flannel-ha: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_debian10-cilium-svc-proxy: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + +packet_debian10-calico: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_debian10-docker: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_debian11-calico: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_debian11-docker: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_centos7-calico-ha-once-localhost: + stage: deploy-part2 + extends: .packet_pr + when: on_success + variables: + # This will instruct Docker not to start over TLS. + DOCKER_TLS_CERTDIR: "" + services: + - docker:19.03.9-dind + +packet_almalinux8-kube-ovn: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + +packet_almalinux8-calico: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_rockylinux8-calico: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_rockylinux9-calico: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_almalinux8-docker: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_fedora36-docker-weave: + stage: deploy-part2 + extends: .packet_pr + when: on_success + +packet_opensuse-canal: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + +packet_opensuse-docker-cilium: + stage: deploy-part2 + extends: .packet_pr + when: manual + +# ### MANUAL JOBS + +packet_ubuntu16-docker-weave-sep: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_ubuntu18-cilium-sep: + stage: deploy-special + extends: .packet_pr + when: manual + +packet_ubuntu18-flannel-ha: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_ubuntu18-flannel-ha-once: + stage: deploy-part2 + extends: .packet_pr + when: manual + +# Calico HA eBPF +packet_almalinux8-calico-ha-ebpf: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_debian9-macvlan: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_centos7-calico-ha: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_centos7-multus-calico: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_centos7-canal-ha: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_fedora36-docker-calico: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + variables: + RESET_CHECK: "true" + +packet_fedora35-calico-selinux: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + +packet_fedora35-calico-swap-selinux: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_amazon-linux-2-aio: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_almalinux8-calico-nodelocaldns-secondary: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_fedora36-kube-ovn: + stage: deploy-part2 + extends: .packet_periodic + when: on_success + +# ### PR JOBS PART3 +# Long jobs (45min+) + +packet_centos7-weave-upgrade-ha: + stage: deploy-part3 + extends: .packet_periodic + when: on_success + variables: + UPGRADE_TEST: basic + +packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha: + stage: deploy-part3 + extends: .packet_periodic + when: on_success + variables: + UPGRADE_TEST: basic + +# Calico HA Wireguard +packet_ubuntu20-calico-ha-wireguard: + stage: deploy-part2 + extends: .packet_pr + when: manual + +packet_debian11-calico-upgrade: + stage: deploy-part3 + extends: .packet_pr + when: on_success + variables: + UPGRADE_TEST: graceful + +packet_almalinux8-calico-remove-node: + stage: deploy-part3 + extends: .packet_pr + when: on_success + variables: + REMOVE_NODE_CHECK: "true" + REMOVE_NODE_NAME: "instance-3" + +packet_ubuntu20-calico-etcd-kubeadm: + stage: deploy-part3 + extends: .packet_pr + when: on_success + +packet_debian11-calico-upgrade-once: + stage: deploy-part3 + extends: .packet_periodic + when: on_success + variables: + UPGRADE_TEST: graceful + +packet_ubuntu18-calico-ha-recover: + stage: deploy-part3 + extends: .packet_periodic + when: on_success + variables: + RECOVER_CONTROL_PLANE_TEST: "true" + RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" + +packet_ubuntu18-calico-ha-recover-noquorum: + stage: deploy-part3 + extends: .packet_periodic + when: on_success + variables: + RECOVER_CONTROL_PLANE_TEST: "true" + RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]" diff --git a/kubespray/.gitlab-ci/shellcheck.yml b/kubespray/.gitlab-ci/shellcheck.yml new file mode 100644 index 0000000..307e121 --- /dev/null +++ b/kubespray/.gitlab-ci/shellcheck.yml @@ -0,0 +1,16 @@ +--- +shellcheck: + extends: .job + stage: unit-tests + tags: [light] + variables: + SHELLCHECK_VERSION: v0.7.1 + before_script: + - ./tests/scripts/rebase.sh + - curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv + - cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/ + - shellcheck --version + script: + # Run shellcheck for all *.sh + - find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error + except: ['triggers', 'master'] diff --git a/kubespray/.gitlab-ci/terraform.yml b/kubespray/.gitlab-ci/terraform.yml new file mode 100644 index 0000000..8ffb111 --- /dev/null +++ b/kubespray/.gitlab-ci/terraform.yml @@ -0,0 +1,235 @@ +--- +# Tests for contrib/terraform/ +.terraform_install: + extends: .job + before_script: + - update-alternatives --install /usr/bin/python python /usr/bin/python3 1 + - ./tests/scripts/rebase.sh + - ./tests/scripts/testcases_prepare.sh + - ./tests/scripts/terraform_install.sh + # Set Ansible config + - cp ansible.cfg ~/.ansible.cfg + # Prepare inventory + - cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars . + - ln -s contrib/terraform/$PROVIDER/hosts + - terraform -chdir="contrib/terraform/$PROVIDER" init + # Copy SSH keypair + - mkdir -p ~/.ssh + - echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa + - chmod 400 ~/.ssh/id_rsa + - echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub + - mkdir -p contrib/terraform/$PROVIDER/group_vars + # Random subnet to avoid routing conflicts + - export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24" + +.terraform_validate: + extends: .terraform_install + stage: unit-tests + tags: [light] + only: ['master', /^pr-.*$/] + script: + - terraform -chdir="contrib/terraform/$PROVIDER" validate + - terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff + +.terraform_apply: + extends: .terraform_install + tags: [light] + stage: deploy-part3 + when: manual + only: [/^pr-.*$/] + artifacts: + when: always + paths: + - cluster-dump/ + variables: + ANSIBLE_INVENTORY_UNPARSED_FAILED: "true" + ANSIBLE_INVENTORY: hosts + CI_PLATFORM: tf + TF_VAR_ssh_user: $SSH_USER + TF_VAR_cluster_name: $CI_JOB_ID + script: + - tests/scripts/testcases_run.sh + after_script: + # Cleanup regardless of exit code + - chronic ./tests/scripts/testcases_cleanup.sh + +tf-validate-openstack: + extends: .terraform_validate + variables: + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: openstack + CLUSTER: $CI_COMMIT_REF_NAME + +tf-validate-metal: + extends: .terraform_validate + variables: + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: metal + CLUSTER: $CI_COMMIT_REF_NAME + +tf-validate-aws: + extends: .terraform_validate + variables: + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: aws + CLUSTER: $CI_COMMIT_REF_NAME + +tf-validate-exoscale: + extends: .terraform_validate + variables: + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: exoscale + +tf-validate-vsphere: + extends: .terraform_validate + variables: + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: vsphere + CLUSTER: $CI_COMMIT_REF_NAME + +tf-validate-upcloud: + extends: .terraform_validate + variables: + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: upcloud + CLUSTER: $CI_COMMIT_REF_NAME + +# tf-packet-ubuntu16-default: +# extends: .terraform_apply +# variables: +# TF_VERSION: $TERRAFORM_VERSION +# PROVIDER: packet +# CLUSTER: $CI_COMMIT_REF_NAME +# TF_VAR_number_of_k8s_masters: "1" +# TF_VAR_number_of_k8s_nodes: "1" +# TF_VAR_plan_k8s_masters: t1.small.x86 +# TF_VAR_plan_k8s_nodes: t1.small.x86 +# TF_VAR_facility: ewr1 +# TF_VAR_public_key_path: "" +# TF_VAR_operating_system: ubuntu_16_04 +# +# tf-packet-ubuntu18-default: +# extends: .terraform_apply +# variables: +# TF_VERSION: $TERRAFORM_VERSION +# PROVIDER: packet +# CLUSTER: $CI_COMMIT_REF_NAME +# TF_VAR_number_of_k8s_masters: "1" +# TF_VAR_number_of_k8s_nodes: "1" +# TF_VAR_plan_k8s_masters: t1.small.x86 +# TF_VAR_plan_k8s_nodes: t1.small.x86 +# TF_VAR_facility: ams1 +# TF_VAR_public_key_path: "" +# TF_VAR_operating_system: ubuntu_18_04 + +.ovh_variables: &ovh_variables + OS_AUTH_URL: https://auth.cloud.ovh.net/v3 + OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe + OS_PROJECT_NAME: "9361447987648822" + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_DOMAIN_ID: default + OS_USERNAME: 8XuhBMfkKVrk + OS_REGION_NAME: UK1 + OS_INTERFACE: public + OS_IDENTITY_API_VERSION: "3" + +# Elastx is generously donating resources for Kubespray on Openstack CI +# Contacts: @gix @bl0m1 +.elastx_variables: &elastx_variables + OS_AUTH_URL: https://ops.elastx.cloud:5000 + OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4 + OS_PROJECT_NAME: kubespray_ci + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_DOMAIN_ID: default + OS_USERNAME: kubespray@root314.com + OS_REGION_NAME: se-sto + OS_INTERFACE: public + OS_IDENTITY_API_VERSION: "3" + TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df" + +tf-elastx_cleanup: + stage: unit-tests + tags: [light] + image: python + variables: + <<: *elastx_variables + before_script: + - pip install -r scripts/openstack-cleanup/requirements.txt + script: + - ./scripts/openstack-cleanup/main.py + +tf-elastx_ubuntu18-calico: + extends: .terraform_apply + stage: deploy-part3 + when: on_success + allow_failure: true + variables: + <<: *elastx_variables + TF_VERSION: $TERRAFORM_VERSION + PROVIDER: openstack + CLUSTER: $CI_COMMIT_REF_NAME + ANSIBLE_TIMEOUT: "60" + SSH_USER: ubuntu + TF_VAR_number_of_k8s_masters: "1" + TF_VAR_number_of_k8s_masters_no_floating_ip: "0" + TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0" + TF_VAR_number_of_etcd: "0" + TF_VAR_number_of_k8s_nodes: "1" + TF_VAR_number_of_k8s_nodes_no_floating_ip: "0" + TF_VAR_number_of_gfs_nodes_no_floating_ip: "0" + TF_VAR_number_of_bastions: "0" + TF_VAR_number_of_k8s_masters_no_etcd: "0" + TF_VAR_floatingip_pool: "elx-public1" + TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]' + TF_VAR_use_access_ip: "0" + TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828" + TF_VAR_network_name: "ci-$CI_JOB_ID" + TF_VAR_az_list: '["sto1"]' + TF_VAR_az_list_node: '["sto1"]' + TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2 + TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2 + TF_VAR_image: ubuntu-18.04-server-latest + TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]' + +# OVH voucher expired, commenting job until things are sorted out + +# tf-ovh_cleanup: +# stage: unit-tests +# tags: [light] +# image: python +# environment: ovh +# variables: +# <<: *ovh_variables +# before_script: +# - pip install -r scripts/openstack-cleanup/requirements.txt +# script: +# - ./scripts/openstack-cleanup/main.py + +# tf-ovh_ubuntu18-calico: +# extends: .terraform_apply +# when: on_success +# environment: ovh +# variables: +# <<: *ovh_variables +# TF_VERSION: $TERRAFORM_VERSION +# PROVIDER: openstack +# CLUSTER: $CI_COMMIT_REF_NAME +# ANSIBLE_TIMEOUT: "60" +# SSH_USER: ubuntu +# TF_VAR_number_of_k8s_masters: "0" +# TF_VAR_number_of_k8s_masters_no_floating_ip: "1" +# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0" +# TF_VAR_number_of_etcd: "0" +# TF_VAR_number_of_k8s_nodes: "0" +# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1" +# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0" +# TF_VAR_number_of_bastions: "0" +# TF_VAR_number_of_k8s_masters_no_etcd: "0" +# TF_VAR_use_neutron: "0" +# TF_VAR_floatingip_pool: "Ext-Net" +# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b" +# TF_VAR_network_name: "Ext-Net" +# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8 +# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8 +# TF_VAR_image: "Ubuntu 18.04" +# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]' diff --git a/kubespray/.gitlab-ci/vagrant.yml b/kubespray/.gitlab-ci/vagrant.yml new file mode 100644 index 0000000..4f7bd9e --- /dev/null +++ b/kubespray/.gitlab-ci/vagrant.yml @@ -0,0 +1,67 @@ +--- + +.vagrant: + extends: .testcases + variables: + CI_PLATFORM: "vagrant" + SSH_USER: "vagrant" + VAGRANT_DEFAULT_PROVIDER: "libvirt" + KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb + tags: [c3.small.x86] + only: [/^pr-.*$/] + except: ['triggers'] + image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION + services: [] + before_script: + - apt-get update && apt-get install -y python3-pip + - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 + - python -m pip uninstall -y ansible ansible-base ansible-core + - python -m pip install -r tests/requirements.txt + - ./tests/scripts/vagrant_clean.sh + script: + - ./tests/scripts/testcases_run.sh + after_script: + - chronic ./tests/scripts/testcases_cleanup.sh + allow_failure: true + +vagrant_ubuntu18-calico-dual-stack: + stage: deploy-part2 + extends: .vagrant + when: on_success + +vagrant_ubuntu18-flannel: + stage: deploy-part2 + extends: .vagrant + when: on_success + +vagrant_ubuntu18-weave-medium: + stage: deploy-part2 + extends: .vagrant + when: manual + +vagrant_ubuntu20-flannel: + stage: deploy-part2 + extends: .vagrant + when: on_success + allow_failure: false + +vagrant_ubuntu16-kube-router-sep: + stage: deploy-part2 + extends: .vagrant + when: manual + +# Service proxy test fails connectivity testing +vagrant_ubuntu16-kube-router-svc-proxy: + stage: deploy-part2 + extends: .vagrant + when: manual + +vagrant_fedora35-kube-router: + stage: deploy-part2 + extends: .vagrant + when: on_success + +vagrant_centos7-kube-router: + stage: deploy-part2 + extends: .vagrant + when: manual diff --git a/kubespray/.gitmodules b/kubespray/.gitmodules new file mode 100644 index 0000000..e69de29 diff --git a/kubespray/.markdownlint.yaml b/kubespray/.markdownlint.yaml new file mode 100644 index 0000000..8ece4c7 --- /dev/null +++ b/kubespray/.markdownlint.yaml @@ -0,0 +1,3 @@ +--- +MD013: false +MD029: false diff --git a/kubespray/.nojekyll b/kubespray/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/kubespray/.pre-commit-config.yaml b/kubespray/.pre-commit-config.yaml new file mode 100644 index 0000000..28cf7a9 --- /dev/null +++ b/kubespray/.pre-commit-config.yaml @@ -0,0 +1,48 @@ +--- +repos: + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.27.1 + hooks: + - id: yamllint + args: [--strict] + + - repo: https://github.com/markdownlint/markdownlint + rev: v0.11.0 + hooks: + - id: markdownlint + args: [ -r, "~MD013,~MD029" ] + exclude: "^.git" + + - repo: local + hooks: + - id: ansible-lint + name: ansible-lint + entry: ansible-lint -v + language: python + pass_filenames: false + additional_dependencies: + - .[community] + + - id: ansible-syntax-check + name: ansible-syntax-check + entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check + language: python + files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml" + + - id: tox-inventory-builder + name: tox-inventory-builder + entry: bash -c "cd contrib/inventory_builder && tox" + language: python + pass_filenames: false + + - id: check-readme-versions + name: check-readme-versions + entry: tests/scripts/check_readme_versions.sh + language: script + pass_filenames: false + + - id: ci-matrix + name: ci-matrix + entry: tests/scripts/md-table/test.sh + language: script + pass_filenames: false diff --git a/kubespray/.yamllint b/kubespray/.yamllint new file mode 100644 index 0000000..01d8b33 --- /dev/null +++ b/kubespray/.yamllint @@ -0,0 +1,19 @@ +--- +extends: default + +ignore: | + .git/ + +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 1 + indentation: + spaces: 2 + indent-sequences: consistent + line-length: disable + new-line-at-end-of-file: disable + truthy: disable diff --git a/kubespray/CNAME b/kubespray/CNAME new file mode 100644 index 0000000..4d47144 --- /dev/null +++ b/kubespray/CNAME @@ -0,0 +1 @@ +kubespray.io \ No newline at end of file diff --git a/kubespray/CONTRIBUTING.md b/kubespray/CONTRIBUTING.md new file mode 100644 index 0000000..6986c0f --- /dev/null +++ b/kubespray/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contributing guidelines + +## How to become a contributor and submit your own code + +### Environment setup + +It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications) + +To install development dependencies you can set up a python virtual env with the necessary dependencies: + +```ShellSession +virtualenv venv +source venv/bin/activate +pip install -r tests/requirements.txt +``` + +#### Linting + +Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR. + +```ShellSession +pre-commit install +pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified +``` + +#### Molecule + +[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`). + +When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment. + +#### Vagrant + +Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md) + +### Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +2. The [repo owners](OWNERS) will respond to your issue promptly. +3. Fork the desired repo, develop and test your code changes. +4. Install [pre-commit](https://pre-commit.com) and install it in your development repo. +5. Addess any pre-commit validation failures. +6. Sign the CNCF CLA () +7. Submit a pull request. +8. Work with the reviewers on their suggestions. +9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits () before final merger of your contribution. diff --git a/kubespray/Dockerfile b/kubespray/Dockerfile new file mode 100644 index 0000000..5645e8d --- /dev/null +++ b/kubespray/Dockerfile @@ -0,0 +1,37 @@ +# Use imutable image tags rather than mutable tags (like ubuntu:20.04) +FROM ubuntu:focal-20220531 + +ARG ARCH=amd64 +ARG TZ=Etc/UTC +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +RUN apt update -y \ + && apt install -y \ + libssl-dev python3-dev sshpass apt-transport-https jq moreutils \ + ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \ + && rm -rf /var/lib/apt/lists/* +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ + && add-apt-repository \ + "deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" \ + && apt update -y && apt-get install --no-install-recommends -y docker-ce \ + && rm -rf /var/lib/apt/lists/* + +# Some tools like yamllint need this +# Pip needs this as well at the moment to install ansible +# (and potentially other packages) +# See: https://github.com/pypa/pip/issues/10219 +ENV LANG=C.UTF-8 + +WORKDIR /kubespray +COPY . . +RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \ + && /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \ + && python3 -m pip install --no-cache-dir -r requirements.txt \ + && update-alternatives --install /usr/bin/python python /usr/bin/python3 1 + +RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \ + && curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \ + && chmod a+x kubectl \ + && mv kubectl /usr/local/bin/kubectl diff --git a/kubespray/LICENSE b/kubespray/LICENSE new file mode 100644 index 0000000..51004ad --- /dev/null +++ b/kubespray/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Kubespray + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubespray/Makefile b/kubespray/Makefile new file mode 100644 index 0000000..793e763 --- /dev/null +++ b/kubespray/Makefile @@ -0,0 +1,7 @@ +mitogen: + @echo Mitogen support is deprecated. + @echo Please run the following command manually: + @echo ansible-playbook -c local mitogen.yml -vv +clean: + rm -rf dist/ + rm *.retry diff --git a/kubespray/OWNERS b/kubespray/OWNERS new file mode 100644 index 0000000..a52158c --- /dev/null +++ b/kubespray/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - kubespray-approvers +reviewers: + - kubespray-reviewers +emeritus_approvers: + - kubespray-emeritus_approvers \ No newline at end of file diff --git a/kubespray/OWNERS_ALIASES b/kubespray/OWNERS_ALIASES new file mode 100644 index 0000000..82449c7 --- /dev/null +++ b/kubespray/OWNERS_ALIASES @@ -0,0 +1,26 @@ +aliases: + kubespray-approvers: + - mattymo + - chadswen + - mirwan + - miouge1 + - luckysb + - floryut + - oomichi + - cristicalin + - liupeng0518 + - yankay + kubespray-reviewers: + - holmsten + - bozzo + - eppo + - oomichi + - jayonlau + - cristicalin + - liupeng0518 + - yankay + kubespray-emeritus_approvers: + - riverzhang + - atoms + - ant31 + - woopstar diff --git a/kubespray/README.md b/kubespray/README.md new file mode 100644 index 0000000..9d8691e --- /dev/null +++ b/kubespray/README.md @@ -0,0 +1,261 @@ +# Deploy a Production Ready Kubernetes Cluster + +![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png) + +If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. +You can get your invite [here](http://slack.k8s.io/) + +- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** +- **Highly available** cluster +- **Composable** (Choice of the network plugin for instance) +- Supports most popular **Linux distributions** +- **Continuous integration tests** + +## Quick Start + +To deploy the cluster you can use : + +### Ansible + +#### Usage + +Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible) +then run the following steps: + +```ShellSession +# Copy ``inventory/sample`` as ``inventory/mycluster`` +cp -rfp inventory/sample inventory/mycluster + +# Update Ansible inventory file with inventory builder +declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) +CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]} + +# Review and change parameters under ``inventory/mycluster/group_vars`` +cat inventory/mycluster/group_vars/all/all.yml +cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml + +# Deploy Kubespray with Ansible Playbook - run the playbook as root +# The option `--become` is required, as for example writing SSL keys in /etc/, +# installing packages and interacting with various systemd daemons. +# Without --become the playbook will fail to run! +ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml +``` + +Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu). +As a consequence, `ansible-playbook` command will fail with: + +```raw +ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. +``` + +probably pointing on a task depending on a module present in requirements.txt. + +One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible. +A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`. + +A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags). +You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this: + +```ShellSession +git checkout v2.20.0 +docker pull quay.io/kubespray/kubespray:v2.20.0 +docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \ + --mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \ + quay.io/kubespray/kubespray:v2.20.0 bash +# Inside the container you may now run the kubespray playbooks: +ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml +``` + +### Vagrant + +For Vagrant we need to install python dependencies for provisioning tasks. +Check if Python and pip are installed: + +```ShellSession +python -V && pip -V +``` + +If this returns the version of the software, you're good to go. If not, download and install Python from here + +Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible) +then run the following step: + +```ShellSession +vagrant up +``` + +## Documents + +- [Requirements](#requirements) +- [Kubespray vs ...](docs/comparisons.md) +- [Getting started](docs/getting-started.md) +- [Setting up your first cluster](docs/setting-up-your-first-cluster.md) +- [Ansible inventory and tags](docs/ansible.md) +- [Integration with existing ansible repo](docs/integration.md) +- [Deployment data variables](docs/vars.md) +- [DNS stack](docs/dns-stack.md) +- [HA mode](docs/ha-mode.md) +- [Network plugins](#network-plugins) +- [Vagrant install](docs/vagrant.md) +- [Flatcar Container Linux bootstrap](docs/flatcar.md) +- [Fedora CoreOS bootstrap](docs/fcos.md) +- [Debian Jessie setup](docs/debian.md) +- [openSUSE setup](docs/opensuse.md) +- [Downloaded artifacts](docs/downloads.md) +- [Cloud providers](docs/cloud.md) +- [OpenStack](docs/openstack.md) +- [AWS](docs/aws.md) +- [Azure](docs/azure.md) +- [vSphere](docs/vsphere.md) +- [Equinix Metal](docs/equinix-metal.md) +- [Large deployments](docs/large-deployments.md) +- [Adding/replacing a node](docs/nodes.md) +- [Upgrades basics](docs/upgrades.md) +- [Air-Gap installation](docs/offline-environment.md) +- [NTP](docs/ntp.md) +- [Hardening](docs/hardening.md) +- [Mirror](docs/mirror.md) +- [Roadmap](docs/roadmap.md) + +## Supported Linux Distributions + +- **Flatcar Container Linux by Kinvolk** +- **Debian** Bullseye, Buster, Jessie, Stretch +- **Ubuntu** 16.04, 18.04, 20.04, 22.04 +- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8) +- **Fedora** 35, 36 +- **Fedora CoreOS** (see [fcos Note](docs/fcos.md)) +- **openSUSE** Leap 15.x/Tumbleweed +- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8) +- **Alma Linux** [8, 9](docs/centos.md#centos-8) +- **Rocky Linux** [8, 9](docs/centos.md#centos-8) +- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md)) +- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)) +- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md)) +- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md)) + +Note: Upstart/SysV init based OS types are not supported. + +## Supported Components + +- Core + - [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5 + - [etcd](https://github.com/etcd-io/etcd) v3.5.6 + - [docker](https://www.docker.com/) v20.10 (see note) + - [containerd](https://containerd.io/) v1.6.14 + - [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) +- Network Plugin + - [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1 + - [calico](https://github.com/projectcalico/calico) v3.24.5 + - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) + - [cilium](https://github.com/cilium/cilium) v1.12.1 + - [flannel](https://github.com/flannel-io/flannel) v0.19.2 + - [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7 + - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1 + - [multus](https://github.com/intel/multus-cni) v3.8 + - [weave](https://github.com/weaveworks/weave) v2.8.1 + - [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5 +- Application + - [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1 + - [coredns](https://github.com/coredns/coredns) v1.9.3 + - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1 + - [krew](https://github.com/kubernetes-sigs/krew) v0.4.3 + - [argocd](https://argoproj.github.io/) v2.5.5 + - [helm](https://helm.sh/) v3.10.3 + - [metallb](https://metallb.universe.tf/) v0.12.1 + - [registry](https://github.com/distribution/distribution) v2.8.1 +- Storage Plugin + - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11 + - [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11 + - [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0 + - [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0 + - [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0 + - [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0 + - [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22 + - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0 + +## Container Runtime Notes + +- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). +- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20) + +## Requirements + +- **Minimum required version of Kubernetes is v1.23** +- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** +- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md)) +- The target servers are configured to allow **IPv4 forwarding**. +- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. +- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. + in order to avoid any issue during deployment you should disable your firewall. +- If kubespray is ran from non-root user account, correct privilege escalation method + should be configured in the target servers. Then the `ansible_become` flag + or command parameters `--become or -b` should be specified. + +Hardware: +These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide. + +- Master + - Memory: 1500 MB +- Node + - Memory: 1024 MB + +## Network Plugins + +You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`) + +- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking. + +- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options + designed to give you the most efficient networking across a range of situations, including non-overlay + and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts, + pods, and (if using Istio and Envoy) applications at the service mesh layer. + +- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins. + +- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic. + +- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. + (Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)). + +- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises. + +- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational + simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy), + iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers). + It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs. + +- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network. + +- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc. + +The choice is defined with the variable `kube_network_plugin`. There is also an +option to leverage built-in cloud provider networking instead. +See also [Network checker](docs/netcheck.md). + +## Ingress Plugins + +- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller. + +- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider. + +## Community docs and resources + +- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/) +- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr +- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty +- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0) + +## Tools and projects on top of Kubespray + +- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) +- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) +- [Kubean](https://github.com/kubean-io/kubean) + +## CI Tests + +[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines) + +CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/). + +See the [test matrix](docs/test_cases.md) for details. diff --git a/kubespray/RELEASE.md b/kubespray/RELEASE.md new file mode 100644 index 0000000..05ea6c0 --- /dev/null +++ b/kubespray/RELEASE.md @@ -0,0 +1,83 @@ +# Release Process + +The Kubespray Project is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325) +2. At least one of the [approvers](OWNERS_ALIASES) must approve this release +3. The `kube_version_min_required` variable is set to `n-1` +4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables. +5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details. +6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes +7. An approver creates a release branch in the form `release-X.Y` +8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details. +9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml` +10. The release issue is closed +11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released` +12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...` + +## Major/minor releases and milestones + +* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags. + +* Security patches and bugs might be backported. + +* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered + via maintenance releases (vX.Y.Z) and assigned to the corresponding open + [GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones). + That milestone remains open for the major/minor releases support lifetime, + which ends once the milestone is closed. Then only a next major or minor release + can be done. + +* Kubespray major and minor releases are bound to the given `kube_version` major/minor + version numbers and other components' arbitrary versions, like etcd or network plugins. + Older or newer component versions are not supported and not tested for the given + release (even if included in the checksum variables, like `kubeadm_checksums`). + +* There is no unstable releases and no APIs, thus Kubespray doesn't follow + [semver](https://semver.org/). Every version describes only a stable release. + Breaking changes, if any introduced by changed defaults or non-contrib ansible roles' + playbooks, shall be described in the release notes. Other breaking changes, if any in + the contributed addons or bound versions of Kubernetes and other components, are + considered out of Kubespray scope and are up to the components' teams to deal with and + document. + +* Minor releases can change components' versions, but not the major `kube_version`. + Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0 + is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`, + then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1 + and *any* changes to other components, like etcd v4, or calico 1.2.3. + And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively. + +## Release note creation + +You can create a release note with: + +```shell +export GITHUB_TOKEN= +export ORG=kubernetes-sigs +export REPO=kubespray +release-notes --start-sha --end-sha --dependencies=false --output=/tmp/kubespray-release-note --required-author="" +``` + +If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.). +It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note) + +## Container image creation + +The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory: + +```shell +cd kubespray/ +nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z . +nerdctl push quay.io/kubespray/kubespray:vX.Y.Z +``` + +The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/: + +```shell +cd kubespray/test-infra/vagrant-docker/ +./build vX.Y.Z +``` + +Please note that the above operation requires the permission to push container images into quay.io/kubespray/. +If you don't have the permission, please ask it on the #kubespray-dev channel. diff --git a/kubespray/SECURITY_CONTACTS b/kubespray/SECURITY_CONTACTS new file mode 100644 index 0000000..21703b3 --- /dev/null +++ b/kubespray/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ +mattymo +floryut +oomichi +cristicalin diff --git a/kubespray/Vagrantfile b/kubespray/Vagrantfile new file mode 100644 index 0000000..63292bd --- /dev/null +++ b/kubespray/Vagrantfile @@ -0,0 +1,275 @@ +# -*- mode: ruby -*- +# # vi: set ft=ruby : + +# For help on using kubespray with vagrant, check out docs/vagrant.md + +require 'fileutils' + +Vagrant.require_version ">= 2.0.0" + +CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb') + +FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json" + +# Uniq disk UUID for libvirt +DISK_UUID = Time.now.utc.to_i + +SUPPORTED_OS = { + "flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]}, + "flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]}, + "flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]}, + "flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]}, + "ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"}, + "ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"}, + "ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"}, + "centos" => {box: "centos/7", user: "vagrant"}, + "centos-bento" => {box: "bento/centos-7.6", user: "vagrant"}, + "centos8" => {box: "centos/8", user: "vagrant"}, + "centos8-bento" => {box: "bento/centos-8", user: "vagrant"}, + "almalinux8" => {box: "almalinux/8", user: "vagrant"}, + "almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"}, + "rockylinux8" => {box: "generic/rocky8", user: "vagrant"}, + "fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"}, + "fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"}, + "opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"}, + "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"}, + "oraclelinux" => {box: "generic/oracle7", user: "vagrant"}, + "oraclelinux8" => {box: "generic/oracle8", user: "vagrant"}, + "rhel7" => {box: "generic/rhel7", user: "vagrant"}, + "rhel8" => {box: "generic/rhel8", user: "vagrant"}, +} + +if File.exist?(CONFIG) + require CONFIG +end + +# Defaults for config options defined in CONFIG +$num_instances ||= 3 +$instance_name_prefix ||= "k8s" +$vm_gui ||= false +$vm_memory ||= 2048 +$vm_cpus ||= 2 +$shared_folders ||= {} +$forwarded_ports ||= {} +$subnet ||= "172.18.8" +$subnet_ipv6 ||= "fd3c:b398:0698:0756" +$os ||= "ubuntu1804" +$network_plugin ||= "flannel" +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +$multi_networking ||= "False" +$download_run_once ||= "True" +$download_force_cache ||= "False" +# The first three nodes are etcd servers +$etcd_instances ||= $num_instances +# The first two nodes are kube masters +$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1) +# All nodes are kube nodes +$kube_node_instances ||= $num_instances +# The following only works when using the libvirt provider +$kube_node_instances_with_disks ||= false +$kube_node_instances_with_disks_size ||= "20G" +$kube_node_instances_with_disks_number ||= 2 +$override_disk_size ||= false +$disk_size ||= "20GB" +$local_path_provisioner_enabled ||= "False" +$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/" +$libvirt_nested ||= false +# boolean or string (e.g. "-vvv") +$ansible_verbosity ||= false +$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || "" + +$playbook ||= "cluster.yml" + +host_vars = {} + +$box = SUPPORTED_OS[$os][:box] +# if $inventory is not set, try to use example +$inventory = "inventory/sample" if ! $inventory +$inventory = File.absolute_path($inventory, File.dirname(__FILE__)) + +# if $inventory has a hosts.ini file use it, otherwise copy over +# vars etc to where vagrant expects dynamic inventory to be +if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini")) + $vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible") + FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible) + $vagrant_inventory = File.join($vagrant_ansible,"inventory") + FileUtils.rm_f($vagrant_inventory) + FileUtils.ln_s($inventory, $vagrant_inventory) +end + +if Vagrant.has_plugin?("vagrant-proxyconf") + $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost" + (1..$num_instances).each do |i| + $no_proxy += ",#{$subnet}.#{i+100}" + end +end + +Vagrant.configure("2") do |config| + + config.vm.box = $box + if SUPPORTED_OS[$os].has_key? :box_url + config.vm.box_url = SUPPORTED_OS[$os][:box_url] + end + config.ssh.username = SUPPORTED_OS[$os][:user] + + # plugin conflict + if Vagrant.has_plugin?("vagrant-vbguest") then + config.vbguest.auto_update = false + end + + # always use Vagrants insecure key + config.ssh.insert_key = false + + if ($override_disk_size) + unless Vagrant.has_plugin?("vagrant-disksize") + system "vagrant plugin install vagrant-disksize" + end + config.disksize.size = $disk_size + end + + (1..$num_instances).each do |i| + config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node| + + node.vm.hostname = vm_name + + if Vagrant.has_plugin?("vagrant-proxyconf") + node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || "" + node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || "" + node.proxy.no_proxy = $no_proxy + end + + ["vmware_fusion", "vmware_workstation"].each do |vmware| + node.vm.provider vmware do |v| + v.vmx['memsize'] = $vm_memory + v.vmx['numvcpus'] = $vm_cpus + end + end + + node.vm.provider :virtualbox do |vb| + vb.memory = $vm_memory + vb.cpus = $vm_cpus + vb.gui = $vm_gui + vb.linked_clone = true + vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM + vb.customize ["modifyvm", :id, "--audio", "none"] + end + + node.vm.provider :libvirt do |lv| + lv.nested = $libvirt_nested + lv.cpu_mode = "host-model" + lv.memory = $vm_memory + lv.cpus = $vm_cpus + lv.default_prefix = 'kubespray' + # Fix kernel panic on fedora 28 + if $os == "fedora" + lv.cpu_mode = "host-passthrough" + end + end + + if $kube_node_instances_with_disks + # Libvirt + driverletters = ('a'..'z').to_a + node.vm.provider :libvirt do |lv| + # always make /dev/sd{a/b/c} so that CI can ensure that + # virtualbox and libvirt will have the same devices to use for OSDs + (1..$kube_node_instances_with_disks_number).each do |d| + lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi" + end + end + end + + if $expose_docker_tcp + node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true + end + + $forwarded_ports.each do |guest, host| + node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true + end + + if ["rhel7","rhel8"].include? $os + # Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot + # be installed until the host is registered with a valid Red Hat support subscription + node.vm.synced_folder ".", "/vagrant", disabled: false + $shared_folders.each do |src, dst| + node.vm.synced_folder src, dst + end + else + node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv'] + $shared_folders.each do |src, dst| + node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] + end + end + + ip = "#{$subnet}.#{i+100}" + node.vm.network :private_network, ip: ip, + :libvirt__guest_ipv6 => 'yes', + :libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}", + :libvirt__ipv6_prefix => "64", + :libvirt__forward_mode => "none", + :libvirt__dhcp_enabled => false + + # Disable swap for each vm + node.vm.provision "shell", inline: "swapoff -a" + + # ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that. + if ["ubuntu1804", "ubuntu2004"].include? $os + node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf" + node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf" + end + + # Disable firewalld on oraclelinux/redhat vms + if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os + node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld" + end + + host_vars[vm_name] = { + "ip": ip, + "flannel_interface": "eth1", + "kube_network_plugin": $network_plugin, + "kube_network_plugin_multus": $multi_networking, + "download_run_once": $download_run_once, + "download_localhost": "False", + "download_cache_dir": ENV['HOME'] + "/kubespray_cache", + # Make kubespray cache even when download_run_once is false + "download_force_cache": $download_force_cache, + # Keeping the cache on the nodes can improve provisioning speed while debugging kubespray + "download_keep_remote_cache": "False", + "docker_rpm_keepcache": "1", + # These two settings will put kubectl and admin.config in $inventory/artifacts + "kubeconfig_localhost": "True", + "kubectl_localhost": "True", + "local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}", + "local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}", + "ansible_ssh_user": SUPPORTED_OS[$os][:user] + } + + # Only execute the Ansible provisioner once, when all the machines are up and ready. + # And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh + if i == $num_instances + node.vm.provision "ansible" do |ansible| + ansible.playbook = $playbook + ansible.verbose = $ansible_verbosity + $ansible_inventory_path = File.join( $inventory, "hosts.ini") + if File.exist?($ansible_inventory_path) + ansible.inventory_path = $ansible_inventory_path + end + ansible.become = true + ansible.limit = "all,localhost" + ansible.host_key_checking = false + ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"] + ansible.host_vars = host_vars + if $ansible_tags != "" + ansible.tags = [$ansible_tags] + end + ansible.groups = { + "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"], + "kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"], + "kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"], + "k8s_cluster:children" => ["kube_control_plane", "kube_node"], + } + end + end + + end + end +end diff --git a/kubespray/_config.yml b/kubespray/_config.yml new file mode 100644 index 0000000..9b68669 --- /dev/null +++ b/kubespray/_config.yml @@ -0,0 +1,2 @@ +--- +theme: jekyll-theme-slate diff --git a/kubespray/ansible.cfg b/kubespray/ansible.cfg new file mode 100644 index 0000000..e28ce32 --- /dev/null +++ b/kubespray/ansible.cfg @@ -0,0 +1,22 @@ +[ssh_connection] +pipelining=True +ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null +#control_path = ~/.ssh/ansible-%%r@%%h:%%p +[defaults] +# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .) +force_valid_group_names = ignore + +host_key_checking=False +gathering = smart +fact_caching = jsonfile +fact_caching_connection = /tmp +fact_caching_timeout = 86400 +stdout_callback = default +display_skipped_hosts = no +library = ./library +callbacks_enabled = profile_tasks,ara_default +roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles +deprecation_warnings=False +inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg +[inventory] +ignore_patterns = artifacts, credentials diff --git a/kubespray/ansible_version.yml b/kubespray/ansible_version.yml new file mode 100644 index 0000000..151131f --- /dev/null +++ b/kubespray/ansible_version.yml @@ -0,0 +1,33 @@ +--- +- hosts: localhost + gather_facts: false + become: no + vars: + minimal_ansible_version: 2.11.0 + maximal_ansible_version: 2.13.0 + ansible_connection: local + tags: always + tasks: + - name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}" + assert: + msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive" + that: + - ansible_version.string is version(minimal_ansible_version, ">=") + - ansible_version.string is version(maximal_ansible_version, "<") + tags: + - check + + - name: "Check that python netaddr is installed" + assert: + msg: "Python netaddr is not present" + that: "'127.0.0.1' | ipaddr" + tags: + - check + + # CentOS 7 provides too old jinja version + - name: "Check that jinja is not too old (install via pip)" + assert: + msg: "Your Jinja version is too old, install via pip" + that: "{% set test %}It works{% endset %}{{ test == 'It works' }}" + tags: + - check diff --git a/kubespray/cluster.yml b/kubespray/cluster.yml new file mode 100644 index 0000000..5f163de --- /dev/null +++ b/kubespray/cluster.yml @@ -0,0 +1,131 @@ +--- +- name: Check ansible version + import_playbook: ansible_version.yml + +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml + +- hosts: bastion[0] + gather_facts: False + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } + +- hosts: k8s_cluster:etcd + strategy: linear + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + gather_facts: false + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: bootstrap-os, tags: bootstrap-os} + +- name: Gather facts + tags: always + import_playbook: facts.yml + +- hosts: k8s_cluster:etcd + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/preinstall, tags: preinstall } + - { role: "container-engine", tags: "container-engine", when: deploy_container_engine } + - { role: download, tags: download, when: "not skip_downloads" } + +- hosts: etcd:kube_control_plane + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - role: etcd + tags: etcd + vars: + etcd_cluster_setup: true + etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" + when: etcd_deployment_type != "kubeadm" + +- hosts: k8s_cluster + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - role: etcd + tags: etcd + vars: + etcd_cluster_setup: false + etcd_events_cluster_setup: false + when: + - etcd_deployment_type != "kubeadm" + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + +- hosts: k8s_cluster + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/node, tags: node } + +- hosts: kube_control_plane + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/control-plane, tags: master } + - { role: kubernetes/client, tags: client } + - { role: kubernetes-apps/cluster_roles, tags: cluster-roles } + +- hosts: k8s_cluster + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/kubeadm, tags: kubeadm} + - { role: kubernetes/node-label, tags: node-label } + - { role: network_plugin, tags: network } + +- hosts: calico_rr + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } + +- hosts: kube_control_plane[0] + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } + +- hosts: kube_control_plane + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } + - { role: kubernetes-apps/network_plugin, tags: network } + - { role: kubernetes-apps/policy_controller, tags: policy-controller } + - { role: kubernetes-apps/ingress_controller, tags: ingress-controller } + - { role: kubernetes-apps/external_provisioner, tags: external-provisioner } + - { role: kubernetes-apps, tags: apps } + +- name: Apply resolv.conf changes now that cluster DNS is up + hosts: k8s_cluster + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } diff --git a/kubespray/code-of-conduct.md b/kubespray/code-of-conduct.md new file mode 100644 index 0000000..0d15c00 --- /dev/null +++ b/kubespray/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/kubespray/contrib/aws_iam/kubernetes-master-policy.json b/kubespray/contrib/aws_iam/kubernetes-master-policy.json new file mode 100644 index 0000000..e5cbaea --- /dev/null +++ b/kubespray/contrib/aws_iam/kubernetes-master-policy.json @@ -0,0 +1,27 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["ec2:*"], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": ["elasticloadbalancing:*"], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": ["route53:*"], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::kubernetes-*" + ] + } + ] +} diff --git a/kubespray/contrib/aws_iam/kubernetes-master-role.json b/kubespray/contrib/aws_iam/kubernetes-master-role.json new file mode 100644 index 0000000..66d5de1 --- /dev/null +++ b/kubespray/contrib/aws_iam/kubernetes-master-role.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/kubespray/contrib/aws_iam/kubernetes-minion-policy.json b/kubespray/contrib/aws_iam/kubernetes-minion-policy.json new file mode 100644 index 0000000..af81e98 --- /dev/null +++ b/kubespray/contrib/aws_iam/kubernetes-minion-policy.json @@ -0,0 +1,45 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::kubernetes-*" + ] + }, + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ec2:AttachVolume", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ec2:DetachVolume", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": ["route53:*"], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } + ] +} diff --git a/kubespray/contrib/aws_iam/kubernetes-minion-role.json b/kubespray/contrib/aws_iam/kubernetes-minion-role.json new file mode 100644 index 0000000..66d5de1 --- /dev/null +++ b/kubespray/contrib/aws_iam/kubernetes-minion-role.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/kubespray/contrib/aws_inventory/kubespray-aws-inventory.py b/kubespray/contrib/aws_inventory/kubespray-aws-inventory.py new file mode 100755 index 0000000..44a4a09 --- /dev/null +++ b/kubespray/contrib/aws_inventory/kubespray-aws-inventory.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +from __future__ import print_function +import boto3 +import os +import argparse +import json + +class SearchEC2Tags(object): + + def __init__(self): + self.parse_args() + if self.args.list: + self.search_tags() + if self.args.host: + data = {} + print(json.dumps(data, indent=2)) + + def parse_args(self): + + ##Check if VPC_VISIBILITY is set, if not default to private + if "VPC_VISIBILITY" in os.environ: + self.vpc_visibility = os.environ['VPC_VISIBILITY'] + else: + self.vpc_visibility = "private" + + ##Support --list and --host flags. We largely ignore the host one. + parser = argparse.ArgumentParser() + parser.add_argument('--list', action='store_true', default=False, help='List instances') + parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance') + self.args = parser.parse_args() + + def search_tags(self): + hosts = {} + hosts['_meta'] = { 'hostvars': {} } + + ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. + for group in ["kube_control_plane", "kube_node", "etcd"]: + hosts[group] = [] + tag_key = "kubespray-role" + tag_value = ["*"+group+"*"] + region = os.environ['REGION'] + + ec2 = boto3.resource('ec2', region) + filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}] + cluster_name = os.getenv('CLUSTER_NAME') + if cluster_name: + filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]}) + instances = ec2.instances.filter(Filters=filters) + for instance in instances: + + ##Suppose default vpc_visibility is private + dns_name = instance.private_dns_name + ansible_host = { + 'ansible_ssh_host': instance.private_ip_address + } + + ##Override when vpc_visibility actually is public + if self.vpc_visibility == "public": + dns_name = instance.public_dns_name + ansible_host = { + 'ansible_ssh_host': instance.public_ip_address + } + + ##Set when instance actually has node_labels + node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags)) + if node_labels_tag: + ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ]) + + hosts[group].append(dns_name) + hosts['_meta']['hostvars'][dns_name] = ansible_host + + hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']} + print(json.dumps(hosts, sort_keys=True, indent=2)) + +SearchEC2Tags() diff --git a/kubespray/contrib/aws_inventory/requirements.txt b/kubespray/contrib/aws_inventory/requirements.txt new file mode 100644 index 0000000..4989ce6 --- /dev/null +++ b/kubespray/contrib/aws_inventory/requirements.txt @@ -0,0 +1 @@ +boto3 # Apache-2.0 \ No newline at end of file diff --git a/kubespray/contrib/azurerm/.gitignore b/kubespray/contrib/azurerm/.gitignore new file mode 100644 index 0000000..3a04fb2 --- /dev/null +++ b/kubespray/contrib/azurerm/.gitignore @@ -0,0 +1,2 @@ +.generated +/inventory \ No newline at end of file diff --git a/kubespray/contrib/azurerm/README.md b/kubespray/contrib/azurerm/README.md new file mode 100644 index 0000000..f24a5ec --- /dev/null +++ b/kubespray/contrib/azurerm/README.md @@ -0,0 +1,67 @@ +# Kubernetes on Azure with Azure Resource Group Templates + +Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates) + +## Status + +This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified +Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course). + +## Requirements + +- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) +- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest) +- Dedicated Resource Group created in the Azure Portal or through azure-cli + +## Configuration through group_vars/all + +You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally +unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public +key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes +experience. + +## Bastion host + +You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated +templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option +also removes all public IPs from all other VMs. + +## Generating and applying + +To generate and apply the templates, call: + +```shell +./apply-rg.sh +``` + +If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will +take care about creating/modifying whatever is needed. + +## Clearing a resource group + +If you need to delete all resources from a resource group, simply call: + +```shell +./clear-rg.sh +``` + +**WARNING** this really deletes everything from your resource group, including everything that was later created by you! + +## Installing Ansible and the dependencies + +Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible) + +## Generating an inventory for kubespray + +After you have applied the templates, you can generate an inventory with this call: + +```shell +./generate-inventory.sh +``` + +It will create the file ./inventory which can then be used with kubespray, e.g.: + +```shell +cd kubespray-root-dir +ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml +``` diff --git a/kubespray/contrib/azurerm/apply-rg.sh b/kubespray/contrib/azurerm/apply-rg.sh new file mode 100755 index 0000000..2348169 --- /dev/null +++ b/kubespray/contrib/azurerm/apply-rg.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -e + +AZURE_RESOURCE_GROUP="$1" + +if [ "$AZURE_RESOURCE_GROUP" == "" ]; then + echo "AZURE_RESOURCE_GROUP is missing" + exit 1 +fi + +ansible-playbook generate-templates.yml + +az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP +az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP +az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP +az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP +az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP +az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP diff --git a/kubespray/contrib/azurerm/clear-rg.sh b/kubespray/contrib/azurerm/clear-rg.sh new file mode 100755 index 0000000..a200455 --- /dev/null +++ b/kubespray/contrib/azurerm/clear-rg.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -e + +AZURE_RESOURCE_GROUP="$1" + +if [ "$AZURE_RESOURCE_GROUP" == "" ]; then + echo "AZURE_RESOURCE_GROUP is missing" + exit 1 +fi + +ansible-playbook generate-templates.yml + +az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete diff --git a/kubespray/contrib/azurerm/generate-inventory.sh b/kubespray/contrib/azurerm/generate-inventory.sh new file mode 100755 index 0000000..b3eb9c0 --- /dev/null +++ b/kubespray/contrib/azurerm/generate-inventory.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -e + +AZURE_RESOURCE_GROUP="$1" + +if [ "$AZURE_RESOURCE_GROUP" == "" ]; then + echo "AZURE_RESOURCE_GROUP is missing" + exit 1 +fi +# check if azure cli 2.0 exists else use azure cli 1.0 +if az &>/dev/null; then + ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP" +elif azure &>/dev/null; then + ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP" +else + echo "Azure cli not found" +fi diff --git a/kubespray/contrib/azurerm/generate-inventory.yml b/kubespray/contrib/azurerm/generate-inventory.yml new file mode 100644 index 0000000..2f5373d --- /dev/null +++ b/kubespray/contrib/azurerm/generate-inventory.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + gather_facts: False + roles: + - generate-inventory diff --git a/kubespray/contrib/azurerm/generate-inventory_2.yml b/kubespray/contrib/azurerm/generate-inventory_2.yml new file mode 100644 index 0000000..bec06c4 --- /dev/null +++ b/kubespray/contrib/azurerm/generate-inventory_2.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + gather_facts: False + roles: + - generate-inventory_2 diff --git a/kubespray/contrib/azurerm/generate-templates.yml b/kubespray/contrib/azurerm/generate-templates.yml new file mode 100644 index 0000000..3d4b1ca --- /dev/null +++ b/kubespray/contrib/azurerm/generate-templates.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + gather_facts: False + roles: + - generate-templates diff --git a/kubespray/contrib/azurerm/group_vars/all b/kubespray/contrib/azurerm/group_vars/all new file mode 100644 index 0000000..44dc1e3 --- /dev/null +++ b/kubespray/contrib/azurerm/group_vars/all @@ -0,0 +1,51 @@ + +# Due to some Azure limitations (ex:- Storage Account's name must be unique), +# this name must be globally unique - it will be used as a prefix for azure components +cluster_name: example + +# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion +# node that can be used to access the masters and minions +use_bastion: false + +# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion..cloudapp.azure.com. +# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host. +# bastion_domain_prefix: k8s-bastion + +number_of_k8s_masters: 3 +number_of_k8s_nodes: 3 + +masters_vm_size: Standard_A2 +masters_os_disk_size: 1000 + +minions_vm_size: Standard_A2 +minions_os_disk_size: 1000 + +admin_username: devops +admin_password: changeme + +# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines +ssh_public_keys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy" + +# Disable using ssh using password. Change it to false to allow to connect to ssh by password +disablePasswordAuthentication: true + +# Azure CIDRs +azure_vnet_cidr: 10.0.0.0/8 +azure_admin_cidr: 10.241.2.0/24 +azure_masters_cidr: 10.0.4.0/24 +azure_minions_cidr: 10.240.0.0/16 + +# Azure loadbalancer port to use to access your cluster +kube_apiserver_port: 6443 + +# Azure Netwoking and storage naming to use with inventory/all.yml +#azure_virtual_network_name: KubeVNET +#azure_subnet_admin_name: ad-subnet +#azure_subnet_masters_name: master-subnet +#azure_subnet_minions_name: minion-subnet +#azure_route_table_name: routetable +#azure_security_group_name: secgroup + +# Storage types available are: "Standard_LRS","Premium_LRS" +#azure_storage_account_type: Standard_LRS diff --git a/kubespray/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/kubespray/contrib/azurerm/roles/generate-inventory/tasks/main.yml new file mode 100644 index 0000000..6176a34 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-inventory/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: Query Azure VMs # noqa 301 + command: azure vm list-ip-address --json {{ azure_resource_group }} + register: vm_list_cmd + +- name: Set vm_list + set_fact: + vm_list: "{{ vm_list_cmd.stdout }}" + +- name: Generate inventory + template: + src: inventory.j2 + dest: "{{ playbook_dir }}/inventory" + mode: 0644 diff --git a/kubespray/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/kubespray/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 new file mode 100644 index 0000000..6c5feb2 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 @@ -0,0 +1,33 @@ + +{% for vm in vm_list %} +{% if not use_bastion or vm.name == 'bastion' %} +{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }} +{% else %} +{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }} +{% endif %} +{% endfor %} + +[kube_control_plane] +{% for vm in vm_list %} +{% if 'kube_control_plane' in vm.tags.roles %} +{{ vm.name }} +{% endif %} +{% endfor %} + +[etcd] +{% for vm in vm_list %} +{% if 'etcd' in vm.tags.roles %} +{{ vm.name }} +{% endif %} +{% endfor %} + +[kube_node] +{% for vm in vm_list %} +{% if 'kube_node' in vm.tags.roles %} +{{ vm.name }} +{% endif %} +{% endfor %} + +[k8s_cluster:children] +kube_node +kube_control_plane diff --git a/kubespray/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/kubespray/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml new file mode 100644 index 0000000..4c80c9a --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml @@ -0,0 +1,31 @@ +--- + +- name: Query Azure VMs IPs # noqa 301 + command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} + register: vm_ip_list_cmd + +- name: Query Azure VMs Roles # noqa 301 + command: az vm list -o json --resource-group {{ azure_resource_group }} + register: vm_list_cmd + +- name: Query Azure Load Balancer Public IP # noqa 301 + command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip + register: lb_pubip_cmd + +- name: Set VM IP, roles lists and load balancer public IP + set_fact: + vm_ip_list: "{{ vm_ip_list_cmd.stdout }}" + vm_roles_list: "{{ vm_list_cmd.stdout }}" + lb_pubip: "{{ lb_pubip_cmd.stdout }}" + +- name: Generate inventory + template: + src: inventory.j2 + dest: "{{ playbook_dir }}/inventory" + mode: 0644 + +- name: Generate Load Balancer variables + template: + src: loadbalancer_vars.j2 + dest: "{{ playbook_dir }}/loadbalancer_vars.yml" + mode: 0644 diff --git a/kubespray/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/kubespray/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 new file mode 100644 index 0000000..6ab59df --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 @@ -0,0 +1,34 @@ + +{% for vm in vm_ip_list %} +{% if not use_bastion or vm.virtualMachine.name == 'bastion' %} +{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }} +{% else %} +{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }} +{% endif %} +{% endfor %} + +[kube_control_plane] +{% for vm in vm_roles_list %} +{% if 'kube_control_plane' in vm.tags.roles %} +{{ vm.name }} +{% endif %} +{% endfor %} + +[etcd] +{% for vm in vm_roles_list %} +{% if 'etcd' in vm.tags.roles %} +{{ vm.name }} +{% endif %} +{% endfor %} + +[kube_node] +{% for vm in vm_roles_list %} +{% if 'kube_node' in vm.tags.roles %} +{{ vm.name }} +{% endif %} +{% endfor %} + +[k8s_cluster:children] +kube_node +kube_control_plane + diff --git a/kubespray/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 b/kubespray/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 new file mode 100644 index 0000000..95a62f3 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 @@ -0,0 +1,8 @@ +## External LB example config +apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }} +loadbalancer_apiserver: + address: {{ lb_pubip.ipAddress }} + port: 6443 + +## Internal loadbalancers for apiservers +loadbalancer_apiserver_localhost: false diff --git a/kubespray/contrib/azurerm/roles/generate-templates/defaults/main.yml b/kubespray/contrib/azurerm/roles/generate-templates/defaults/main.yml new file mode 100644 index 0000000..1ba2480 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/defaults/main.yml @@ -0,0 +1,37 @@ +--- +apiVersion: "2015-06-15" + +virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}" + +subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}" +subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}" +subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}" + +routeTableName: "{{ azure_route_table_name | default('routetable') }}" +securityGroupName: "{{ azure_security_group_name | default('secgroup') }}" + +nameSuffix: "{{ cluster_name }}" + +availabilitySetMasters: "master-avs" +availabilitySetMinions: "minion-avs" + +faultDomainCount: 3 +updateDomainCount: 10 + +bastionVmSize: Standard_A0 +bastionVMName: bastion +bastionIPAddressName: bastion-pubip + +disablePasswordAuthentication: true + +sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys" + +imageReference: + publisher: "OpenLogic" + offer: "CentOS" + sku: "7.5" + version: "latest" +imageReferenceJson: "{{imageReference|to_json}}" + +storageAccountName: "sa{{nameSuffix | replace('-', '')}}" +storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}" diff --git a/kubespray/contrib/azurerm/roles/generate-templates/tasks/main.yml b/kubespray/contrib/azurerm/roles/generate-templates/tasks/main.yml new file mode 100644 index 0000000..294ee96 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Set base_dir + set_fact: + base_dir: "{{ playbook_dir }}/.generated/" + +- name: Create base_dir + file: + path: "{{ base_dir }}" + state: directory + recurse: true + mode: 0755 + +- name: Store json files in base_dir + template: + src: "{{ item }}" + dest: "{{ base_dir }}/{{ item }}" + mode: 0644 + with_items: + - network.json + - storage.json + - availability-sets.json + - bastion.json + - masters.json + - minions.json + - clear-rg.json diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/availability-sets.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/availability-sets.json new file mode 100644 index 0000000..4f458cd --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/availability-sets.json @@ -0,0 +1,30 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + }, + "variables": { + }, + "resources": [ + { + "type": "Microsoft.Compute/availabilitySets", + "name": "{{availabilitySetMasters}}", + "apiVersion": "{{apiVersion}}", + "location": "[resourceGroup().location]", + "properties": { + "PlatformFaultDomainCount": "{{faultDomainCount}}", + "PlatformUpdateDomainCount": "{{updateDomainCount}}" + } + }, + { + "type": "Microsoft.Compute/availabilitySets", + "name": "{{availabilitySetMinions}}", + "apiVersion": "{{apiVersion}}", + "location": "[resourceGroup().location]", + "properties": { + "PlatformFaultDomainCount": "{{faultDomainCount}}", + "PlatformUpdateDomainCount": "{{updateDomainCount}}" + } + } + ] +} \ No newline at end of file diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/bastion.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/bastion.json new file mode 100644 index 0000000..d7fd9c8 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/bastion.json @@ -0,0 +1,106 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + }, + "variables": { + "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", + "subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]" + }, + "resources": [ + {% if use_bastion %} + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/publicIPAddresses", + "name": "{{bastionIPAddressName}}", + "location": "[resourceGroup().location]", + "properties": { + "publicIPAllocationMethod": "Static", + "dnsSettings": { + {% if bastion_domain_prefix %} + "domainNameLabel": "{{ bastion_domain_prefix }}" + {% endif %} + } + } + }, + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/networkInterfaces", + "name": "{{bastionVMName}}-nic", + "location": "[resourceGroup().location]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "BastionIpConfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]" + }, + "subnet": { + "id": "[variables('subnetAdminRef')]" + } + } + } + ] + } + }, + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Compute/virtualMachines", + "name": "{{bastionVMName}}", + "location": "[resourceGroup().location]", + "dependsOn": [ + "[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]" + ], + "tags": { + "roles": "bastion" + }, + "properties": { + "hardwareProfile": { + "vmSize": "{{bastionVmSize}}" + }, + "osProfile": { + "computerName": "{{bastionVMName}}", + "adminUsername": "{{admin_username}}", + "adminPassword": "{{admin_password}}", + "linuxConfiguration": { + "disablePasswordAuthentication": "true", + "ssh": { + "publicKeys": [ + {% for key in ssh_public_keys %} + { + "path": "{{sshKeyPath}}", + "keyData": "{{key}}" + }{% if loop.index < ssh_public_keys | length %},{% endif %} + {% endfor %} + ] + } + } + }, + "storageProfile": { + "imageReference": {{imageReferenceJson}}, + "osDisk": { + "name": "osdisk", + "vhd": { + "uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]" + }, + "caching": "ReadWrite", + "createOption": "FromImage" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]" + } + ] + } + } + } + {% endif %} + ] +} \ No newline at end of file diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/clear-rg.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/clear-rg.json new file mode 100644 index 0000000..5facf5e --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/clear-rg.json @@ -0,0 +1,8 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [], + "outputs": {} +} \ No newline at end of file diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/masters.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/masters.json new file mode 100644 index 0000000..b299383 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/masters.json @@ -0,0 +1,198 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + }, + "variables": { + "lbDomainName": "{{nameSuffix}}-api", + "lbPublicIPAddressName": "kubernetes-api-pubip", + "lbPublicIPAddressType": "Static", + "lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]", + "lbName": "kubernetes-api", + "lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]", + + "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", + "kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]" + }, + "resources": [ + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/publicIPAddresses", + "name": "[variables('lbPublicIPAddressName')]", + "location": "[resourceGroup().location]", + "properties": { + "publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]", + "dnsSettings": { + "domainNameLabel": "[variables('lbDomainName')]" + } + } + }, + { + "apiVersion": "{{apiVersion}}", + "name": "[variables('lbName')]", + "type": "Microsoft.Network/loadBalancers", + "location": "[resourceGroup().location]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]" + ], + "properties": { + "frontendIPConfigurations": [ + { + "name": "kube-api-frontend", + "properties": { + "publicIPAddress": { + "id": "[variables('lbPublicIPAddressID')]" + } + } + } + ], + "backendAddressPools": [ + { + "name": "kube-api-backend" + } + ], + "loadBalancingRules": [ + { + "name": "kube-api", + "properties": { + "frontendIPConfiguration": { + "id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]" + }, + "backendAddressPool": { + "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" + }, + "protocol": "tcp", + "frontendPort": "{{kube_apiserver_port}}", + "backendPort": "{{kube_apiserver_port}}", + "enableFloatingIP": false, + "idleTimeoutInMinutes": 5, + "probe": { + "id": "[concat(variables('lbID'), '/probes/kube-api')]" + } + } + } + ], + "probes": [ + { + "name": "kube-api", + "properties": { + "protocol": "tcp", + "port": "{{kube_apiserver_port}}", + "intervalInSeconds": 5, + "numberOfProbes": 2 + } + } + ] + } + }, + {% for i in range(number_of_k8s_masters) %} + {% if not use_bastion %} + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/publicIPAddresses", + "name": "master-{{i}}-pubip", + "location": "[resourceGroup().location]", + "properties": { + "publicIPAllocationMethod": "Static" + } + }, + {% endif %} + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/networkInterfaces", + "name": "master-{{i}}-nic", + "location": "[resourceGroup().location]", + "dependsOn": [ + {% if not use_bastion %} + "[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]", + {% endif %} + "[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "MastersIpConfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + {% if not use_bastion %} + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]" + }, + {% endif %} + "subnet": { + "id": "[variables('kubeMastersSubnetRef')]" + }, + "loadBalancerBackendAddressPools": [ + { + "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" + } + ] + } + } + ], + "networkSecurityGroup": { + "id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]" + }, + "enableIPForwarding": true + } + }, + { + "type": "Microsoft.Compute/virtualMachines", + "name": "master-{{i}}", + "location": "[resourceGroup().location]", + "dependsOn": [ + "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]" + ], + "tags": { + "roles": "kube_control_plane,etcd" + }, + "apiVersion": "{{apiVersion}}", + "properties": { + "availabilitySet": { + "id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]" + }, + "hardwareProfile": { + "vmSize": "{{masters_vm_size}}" + }, + "osProfile": { + "computerName": "master-{{i}}", + "adminUsername": "{{admin_username}}", + "adminPassword": "{{admin_password}}", + "linuxConfiguration": { + "disablePasswordAuthentication": "{{disablePasswordAuthentication}}", + "ssh": { + "publicKeys": [ + {% for key in ssh_public_keys %} + { + "path": "{{sshKeyPath}}", + "keyData": "{{key}}" + }{% if loop.index < ssh_public_keys | length %},{% endif %} + {% endfor %} + ] + } + } + }, + "storageProfile": { + "imageReference": {{imageReferenceJson}}, + "osDisk": { + "name": "ma{{nameSuffix}}{{i}}", + "vhd": { + "uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]" + }, + "caching": "ReadWrite", + "createOption": "FromImage", + "diskSizeGB": "{{masters_os_disk_size}}" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]" + } + ] + } + } + } {% if not loop.last %},{% endif %} + {% endfor %} + ] +} diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/minions.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/minions.json new file mode 100644 index 0000000..bd0d059 --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/minions.json @@ -0,0 +1,115 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + }, + "variables": { + "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", + "kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]" + }, + "resources": [ + {% for i in range(number_of_k8s_nodes) %} + {% if not use_bastion %} + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/publicIPAddresses", + "name": "minion-{{i}}-pubip", + "location": "[resourceGroup().location]", + "properties": { + "publicIPAllocationMethod": "Static" + } + }, + {% endif %} + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/networkInterfaces", + "name": "minion-{{i}}-nic", + "location": "[resourceGroup().location]", + "dependsOn": [ + {% if not use_bastion %} + "[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]" + {% endif %} + ], + "properties": { + "ipConfigurations": [ + { + "name": "MinionsIpConfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + {% if not use_bastion %} + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]" + }, + {% endif %} + "subnet": { + "id": "[variables('kubeMinionsSubnetRef')]" + } + } + } + ], + "networkSecurityGroup": { + "id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]" + }, + "enableIPForwarding": true + } + }, + { + "type": "Microsoft.Compute/virtualMachines", + "name": "minion-{{i}}", + "location": "[resourceGroup().location]", + "dependsOn": [ + "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]" + ], + "tags": { + "roles": "kube_node" + }, + "apiVersion": "{{apiVersion}}", + "properties": { + "availabilitySet": { + "id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]" + }, + "hardwareProfile": { + "vmSize": "{{minions_vm_size}}" + }, + "osProfile": { + "computerName": "minion-{{i}}", + "adminUsername": "{{admin_username}}", + "adminPassword": "{{admin_password}}", + "linuxConfiguration": { + "disablePasswordAuthentication": "{{disablePasswordAuthentication}}", + "ssh": { + "publicKeys": [ + {% for key in ssh_public_keys %} + { + "path": "{{sshKeyPath}}", + "keyData": "{{key}}" + }{% if loop.index < ssh_public_keys | length %},{% endif %} + {% endfor %} + ] + } + } + }, + "storageProfile": { + "imageReference": {{imageReferenceJson}}, + "osDisk": { + "name": "mi{{nameSuffix}}{{i}}", + "vhd": { + "uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]" + }, + "caching": "ReadWrite", + "createOption": "FromImage", + "diskSizeGB": "{{minions_os_disk_size}}" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]" + } + ] + } + } + } {% if not loop.last %},{% endif %} + {% endfor %} + ] +} diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/network.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/network.json new file mode 100644 index 0000000..763b3db --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/network.json @@ -0,0 +1,109 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + }, + "variables": { + }, + "resources": [ + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/routeTables", + "name": "{{routeTableName}}", + "location": "[resourceGroup().location]", + "properties": { + "routes": [ + ] + } + }, + { + "type": "Microsoft.Network/virtualNetworks", + "name": "{{virtualNetworkName}}", + "location": "[resourceGroup().location]", + "apiVersion": "{{apiVersion}}", + "dependsOn": [ + "[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]" + ], + "properties": { + "addressSpace": { + "addressPrefixes": [ + "{{azure_vnet_cidr}}" + ] + }, + "subnets": [ + { + "name": "{{subnetMastersName}}", + "properties": { + "addressPrefix": "{{azure_masters_cidr}}", + "routeTable": { + "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" + } + } + }, + { + "name": "{{subnetMinionsName}}", + "properties": { + "addressPrefix": "{{azure_minions_cidr}}", + "routeTable": { + "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" + } + } + } + {% if use_bastion %} + ,{ + "name": "{{subnetAdminName}}", + "properties": { + "addressPrefix": "{{azure_admin_cidr}}", + "routeTable": { + "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" + } + } + } + {% endif %} + ] + } + }, + { + "apiVersion": "{{apiVersion}}", + "type": "Microsoft.Network/networkSecurityGroups", + "name": "{{securityGroupName}}", + "location": "[resourceGroup().location]", + "properties": { + "securityRules": [ + {% if not use_bastion %} + { + "name": "ssh", + "properties": { + "description": "Allow SSH", + "protocol": "Tcp", + "sourcePortRange": "*", + "destinationPortRange": "22", + "sourceAddressPrefix": "Internet", + "destinationAddressPrefix": "*", + "access": "Allow", + "priority": 100, + "direction": "Inbound" + } + }, + {% endif %} + { + "name": "kube-api", + "properties": { + "description": "Allow secure kube-api", + "protocol": "Tcp", + "sourcePortRange": "*", + "destinationPortRange": "{{kube_apiserver_port}}", + "sourceAddressPrefix": "Internet", + "destinationAddressPrefix": "*", + "access": "Allow", + "priority": 101, + "direction": "Inbound" + } + } + ] + }, + "resources": [], + "dependsOn": [] + } + ] +} diff --git a/kubespray/contrib/azurerm/roles/generate-templates/templates/storage.json b/kubespray/contrib/azurerm/roles/generate-templates/templates/storage.json new file mode 100644 index 0000000..2632aba --- /dev/null +++ b/kubespray/contrib/azurerm/roles/generate-templates/templates/storage.json @@ -0,0 +1,19 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + }, + "variables": { + }, + "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "name": "{{storageAccountName}}", + "location": "[resourceGroup().location]", + "apiVersion": "{{apiVersion}}", + "properties": { + "accountType": "{{storageAccountType}}" + } + } + ] +} \ No newline at end of file diff --git a/kubespray/contrib/dind/README.md b/kubespray/contrib/dind/README.md new file mode 100644 index 0000000..5e72cfc --- /dev/null +++ b/kubespray/contrib/dind/README.md @@ -0,0 +1,177 @@ +# Kubespray DIND experimental setup + +This ansible playbook creates local docker containers +to serve as Kubernetes "nodes", which in turn will run +"normal" Kubernetes docker containers, a mode usually +called DIND (Docker-IN-Docker). + +The playbook has two roles: + +- dind-host: creates the "nodes" as containers in localhost, with + appropriate settings for DIND (privileged, volume mapping for dind + storage, etc). +- dind-cluster: customizes each node container to have required + system packages installed, and some utils (swapoff, lsattr) + symlinked to /bin/true to ease mimicking a real node. + +This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04 +as docker images (note that dind-cluster has specific customization +for these images). + +The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh` +helper (wraps up running `contrib/inventory_builder/inventory.py` with +node containers IPs and prefix). + +## Deploying + +See below for a complete successful run: + +1. Create the node containers + +```shell +# From the kubespray root dir +cd contrib/dind +pip install -r requirements.txt + +ansible-playbook -i hosts dind-cluster.yaml + +# Back to kubespray root +cd ../.. +``` + +NOTE: if the playbook run fails with something like below error +message, you may need to specifically set `ansible_python_interpreter`, +see `./hosts` file for an example expanded localhost entry. + +```shell +failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"} +``` + +2. Customize kubespray-dind.yaml + +Note that there's coupling between above created node containers +and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro` +(as set in `group_vars/all/all.yaml`), and docker settings. + +```shell +$EDITOR contrib/dind/kubespray-dind.yaml +``` + +3. Prepare the inventory and run the playbook + +```shell +INVENTORY_DIR=inventory/local-dind +mkdir -p ${INVENTORY_DIR} +rm -f ${INVENTORY_DIR}/hosts.ini +CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh + +ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml +``` + +NOTE: You could also test other distros without editing files by +passing `--extra-vars` as per below commandline, +replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`: + +```shell +cd contrib/dind +ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO + +cd ../.. +CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh +ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO +``` + +## Resulting deployment + +See below to get an idea on how a completed deployment looks like, +from the host where you ran kubespray playbooks. + +### node_distro: debian + +Running from an Ubuntu Xenial host: + +```shell +$ uname -a +Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24 +15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux + +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5 +30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4 +3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3 +738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2 +c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1 + +$ docker exec kube-node1 kubectl get node +NAME STATUS ROLES AGE VERSION +kube-node1 Ready master,node 18m v1.12.1 +kube-node2 Ready master,node 17m v1.12.1 +kube-node3 Ready node 17m v1.12.1 +kube-node4 Ready node 17m v1.12.1 +kube-node5 Ready node 17m v1.12.1 + +$ docker exec kube-node1 kubectl get pod --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE +default netchecker-agent-67489 1/1 Running 0 2m51s +default netchecker-agent-6qq6s 1/1 Running 0 2m51s +default netchecker-agent-fsw92 1/1 Running 0 2m51s +default netchecker-agent-fw6tl 1/1 Running 0 2m51s +default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m +default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m +default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m +default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m +default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m +default netchecker-agent-lxdrn 1/1 Running 0 2m51s +default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s +default sh-68fcc6db45-qf55h 1/1 Running 1 12m +kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m +kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m +kube-system kube-apiserver-kube-node1 1/1 Running 0 17m +kube-system kube-apiserver-kube-node2 1/1 Running 0 18m +kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m +kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m +kube-system kube-proxy-5xx9d 1/1 Running 0 17m +kube-system kube-proxy-cdqq4 1/1 Running 0 17m +kube-system kube-proxy-n64ls 1/1 Running 0 17m +kube-system kube-proxy-pswmj 1/1 Running 0 18m +kube-system kube-proxy-x89qw 1/1 Running 0 18m +kube-system kube-scheduler-kube-node1 1/1 Running 4 17m +kube-system kube-scheduler-kube-node2 1/1 Running 4 18m +kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m +kube-system nginx-proxy-kube-node3 1/1 Running 4 17m +kube-system nginx-proxy-kube-node4 1/1 Running 4 17m +kube-system nginx-proxy-kube-node5 1/1 Running 4 17m +kube-system weave-net-42bfr 2/2 Running 0 16m +kube-system weave-net-6gt8m 2/2 Running 0 16m +kube-system weave-net-88nnc 2/2 Running 0 16m +kube-system weave-net-shckr 2/2 Running 0 16m +kube-system weave-net-xr46t 2/2 Running 0 16m + +$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check +{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null} +``` + +## Using ./run-test-distros.sh + +You can use `./run-test-distros.sh` to run a set of tests via DIND, +and excerpt from this script, to get an idea: + +```shell +# The SPEC file(s) must have two arrays as e.g. +# DISTROS=(debian centos) +# EXTRAS=( +# 'kube_network_plugin=calico' +# 'kube_network_plugin=flannel' +# 'kube_network_plugin=weave' +# ) +# that will be tested in a "combinatory" way (e.g. from above there'll be +# be 6 test runs), creating a sequenced -nn.out with each output. +# +# Each $EXTRAS element will be whitespace split, and passed as --extra-vars +# to main kubespray ansible-playbook run. +``` + +See e.g. `test-some_distros-most_CNIs.env` and +`test-some_distros-kube_router_combo.env` in particular for a richer +set of CNI specific `--extra-vars` combo. diff --git a/kubespray/contrib/dind/dind-cluster.yaml b/kubespray/contrib/dind/dind-cluster.yaml new file mode 100644 index 0000000..3fcae1e --- /dev/null +++ b/kubespray/contrib/dind/dind-cluster.yaml @@ -0,0 +1,9 @@ +--- +- hosts: localhost + gather_facts: False + roles: + - { role: dind-host } + +- hosts: containers + roles: + - { role: dind-cluster } diff --git a/kubespray/contrib/dind/group_vars/all/all.yaml b/kubespray/contrib/dind/group_vars/all/all.yaml new file mode 100644 index 0000000..fd619a0 --- /dev/null +++ b/kubespray/contrib/dind/group_vars/all/all.yaml @@ -0,0 +1,3 @@ +--- +# See distro.yaml for supported node_distro images +node_distro: debian diff --git a/kubespray/contrib/dind/group_vars/all/distro.yaml b/kubespray/contrib/dind/group_vars/all/distro.yaml new file mode 100644 index 0000000..b9c2670 --- /dev/null +++ b/kubespray/contrib/dind/group_vars/all/distro.yaml @@ -0,0 +1,41 @@ +--- +distro_settings: + debian: &DEBIAN + image: "debian:9.5" + user: "debian" + pid1_exe: /lib/systemd/systemd + init: | + sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init" + raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2 + raw_setup_done: test -x /usr/bin/sudo + agetty_svc: getty@* + ssh_service: ssh + extra_packages: [] + ubuntu: + <<: *DEBIAN + image: "ubuntu:16.04" + user: "ubuntu" + init: | + /sbin/init + centos: &CENTOS + image: "centos:7" + user: "centos" + pid1_exe: /usr/lib/systemd/systemd + init: | + /sbin/init + raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables + raw_setup_done: test -x /usr/bin/sudo + agetty_svc: getty@* serial-getty@* + ssh_service: sshd + extra_packages: [] + fedora: + <<: *CENTOS + image: "fedora:latest" + user: "fedora" + raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d + extra_packages: + - hostname + - procps + - findutils + - kmod + - iputils diff --git a/kubespray/contrib/dind/hosts b/kubespray/contrib/dind/hosts new file mode 100644 index 0000000..356aa26 --- /dev/null +++ b/kubespray/contrib/dind/hosts @@ -0,0 +1,15 @@ +[local] +# If you created a virtualenv for ansible, you may need to specify running the +# python binary from there instead: +#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python +localhost ansible_connection=local + +[containers] +kube-node1 +kube-node2 +kube-node3 +kube-node4 +kube-node5 + +[containers:vars] +ansible_connection=docker diff --git a/kubespray/contrib/dind/kubespray-dind.yaml b/kubespray/contrib/dind/kubespray-dind.yaml new file mode 100644 index 0000000..ecfb557 --- /dev/null +++ b/kubespray/contrib/dind/kubespray-dind.yaml @@ -0,0 +1,22 @@ +--- +# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND +# See contrib/dind/README.md +kube_api_anonymous_auth: true + +kubelet_fail_swap_on: false + +# Docker nodes need to have been created with same "node_distro: debian" +# at contrib/dind/group_vars/all/all.yaml +bootstrap_os: debian + +docker_version: latest + +docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker + +dns_mode: coredns + +deploy_netchecker: True +netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent +netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server +netcheck_agent_image_tag: v1.0 +netcheck_server_image_tag: v1.0 diff --git a/kubespray/contrib/dind/requirements.txt b/kubespray/contrib/dind/requirements.txt new file mode 100644 index 0000000..bdb9670 --- /dev/null +++ b/kubespray/contrib/dind/requirements.txt @@ -0,0 +1 @@ +docker diff --git a/kubespray/contrib/dind/roles/dind-cluster/tasks/main.yaml b/kubespray/contrib/dind/roles/dind-cluster/tasks/main.yaml new file mode 100644 index 0000000..247a0a8 --- /dev/null +++ b/kubespray/contrib/dind/roles/dind-cluster/tasks/main.yaml @@ -0,0 +1,73 @@ +--- +- name: set_fact distro_setup + set_fact: + distro_setup: "{{ distro_settings[node_distro] }}" + +- name: set_fact other distro settings + set_fact: + distro_user: "{{ distro_setup['user'] }}" + distro_ssh_service: "{{ distro_setup['ssh_service'] }}" + distro_extra_packages: "{{ distro_setup['extra_packages'] }}" + +- name: Null-ify some linux tools to ease DIND + file: + src: "/bin/true" + dest: "{{ item }}" + state: link + force: yes + with_items: + # DIND box may have swap enable, don't bother + - /sbin/swapoff + # /etc/hosts handling would fail on trying to copy file attributes on edit, + # void it by successfully returning nil output + - /usr/bin/lsattr + # disable selinux-isms, sp needed if running on non-Selinux host + - /usr/sbin/semodule + +- name: Void installing dpkg docs and man pages on Debian based distros + copy: + content: | + # Delete locales + path-exclude=/usr/share/locale/* + # Delete man pages + path-exclude=/usr/share/man/* + # Delete docs + path-exclude=/usr/share/doc/* + path-include=/usr/share/doc/*/copyright + dest: /etc/dpkg/dpkg.cfg.d/01_nodoc + mode: 0644 + when: + - ansible_os_family == 'Debian' + +- name: Install system packages to better match a full-fledge node + package: + name: "{{ item }}" + state: present + with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]" + +- name: Start needed services + service: + name: "{{ item }}" + state: started + with_items: + - rsyslog + - "{{ distro_ssh_service }}" + +- name: Create distro user "{{ distro_user }}" + user: + name: "{{ distro_user }}" + uid: 1000 + # groups: sudo + append: yes + +- name: Allow password-less sudo to "{{ distro_user }}" + copy: + content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL" + dest: "/etc/sudoers.d/{{ distro_user }}" + mode: 0640 + +- name: Add my pubkey to "{{ distro_user }}" user authorized keys + authorized_key: + user: "{{ distro_user }}" + state: present + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" diff --git a/kubespray/contrib/dind/roles/dind-host/tasks/main.yaml b/kubespray/contrib/dind/roles/dind-host/tasks/main.yaml new file mode 100644 index 0000000..5b63a6b --- /dev/null +++ b/kubespray/contrib/dind/roles/dind-host/tasks/main.yaml @@ -0,0 +1,88 @@ +--- +- name: set_fact distro_setup + set_fact: + distro_setup: "{{ distro_settings[node_distro] }}" + +- name: set_fact other distro settings + set_fact: + distro_image: "{{ distro_setup['image'] }}" + distro_init: "{{ distro_setup['init'] }}" + distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}" + distro_raw_setup: "{{ distro_setup['raw_setup'] }}" + distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}" + distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}" + +- name: Create dind node containers from "containers" inventory section + docker_container: + image: "{{ distro_image }}" + name: "{{ item }}" + state: started + hostname: "{{ item }}" + command: "{{ distro_init }}" + # recreate: yes + privileged: true + tmpfs: + - /sys/module/nf_conntrack/parameters + volumes: + - /boot:/boot + - /lib/modules:/lib/modules + - "{{ item }}:/dind/docker" + register: containers + with_items: "{{ groups.containers }}" + tags: + - addresses + +- name: Gather list of containers IPs + set_fact: + addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}" + tags: + - addresses + +- name: Create inventory_builder helper already set with the list of node containers' IPs + template: + src: inventory_builder.sh.j2 + dest: /tmp/kubespray.dind.inventory_builder.sh + mode: 0755 + tags: + - addresses + +- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing + raw: | + # agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task + pkill -STOP agetty || true + {{ distro_raw_setup_done }} && echo SKIPPED && exit 0 + until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done + {{ distro_raw_setup }} + delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + with_items: "{{ containers.results }}" + register: result + changed_when: result.stdout.find("SKIPPED") < 0 + +- name: Remove gettys from node containers + raw: | + until test -S /var/run/dbus/system_bus_socket; do sleep 1; done + systemctl disable {{ distro_agetty_svc }} + systemctl stop {{ distro_agetty_svc }} + delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + with_items: "{{ containers.results }}" + changed_when: false + +# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian, +# handle manually +- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301 + raw: | + echo {{ item | hash('sha1') }} > /etc/machine-id.new + mv -b /etc/machine-id.new /etc/machine-id + cmp /etc/machine-id /etc/machine-id~ || true + systemctl daemon-reload + delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + with_items: "{{ containers.results }}" + +- name: Early hack image install to adapt for DIND + # noqa 302 - this task uses the raw module intentionally + raw: | + rm -fv /usr/bin/udevadm /usr/sbin/udevadm + delegate_to: "{{ item._ansible_item_label|default(item.item) }}" + with_items: "{{ containers.results }}" + register: result + changed_when: result.stdout.find("removed") >= 0 diff --git a/kubespray/contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2 b/kubespray/contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2 new file mode 100644 index 0000000..48e1758 --- /dev/null +++ b/kubespray/contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2 @@ -0,0 +1,3 @@ +#!/bin/bash +# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section +HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %} diff --git a/kubespray/contrib/dind/run-test-distros.sh b/kubespray/contrib/dind/run-test-distros.sh new file mode 100755 index 0000000..3695276 --- /dev/null +++ b/kubespray/contrib/dind/run-test-distros.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# Q&D test'em all: creates full DIND kubespray deploys +# for each distro, verifying it via netchecker. + +info() { + local msg="$*" + local date="$(date -Isec)" + echo "INFO: [$date] $msg" +} +pass_or_fail() { + local rc="$?" + local msg="$*" + local date="$(date -Isec)" + [ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg" + return $rc +} +test_distro() { + local distro=${1:?};shift + local extra="${*:-}" + local prefix="${distro[${extra}]}" + ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro + pass_or_fail "$prefix: dind-nodes" || return 1 + (cd ../.. + INVENTORY_DIR=inventory/local-dind + mkdir -p ${INVENTORY_DIR} + rm -f ${INVENTORY_DIR}/hosts.ini + CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh + # expand $extra with -e in front of each word + extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done + ansible-playbook --become -e ansible_ssh_user=$distro -i \ + ${INVENTORY_DIR}/hosts.ini cluster.yml \ + -e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args} + pass_or_fail "$prefix: kubespray" + ) || return 1 + local node0=${NODES[0]} + docker exec ${node0} kubectl get pod --all-namespaces + pass_or_fail "$prefix: kube-api" || return 1 + let retries=60 + while ((retries--)); do + # Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort) + # e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217 + docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break + sleep 2 + done + [ $retries -ge 0 ] + pass_or_fail "$prefix: netcheck" || return 1 +} + +NODES=($(egrep ^kube_node hosts)) +NETCHECKER_HOST=localhost + +: ${OUTPUT_DIR:=./out} +mkdir -p ${OUTPUT_DIR} + +# The SPEC file(s) must have two arrays as e.g. +# DISTROS=(debian centos) +# EXTRAS=( +# 'kube_network_plugin=calico' +# 'kube_network_plugin=flannel' +# 'kube_network_plugin=weave' +# ) +# that will be tested in a "combinatory" way (e.g. from above there'll be +# be 6 test runs), creating a sequenced -nn.out with each output. +# +# Each $EXTRAS element will be whitespace split, and passed as --extra-vars +# to main kubespray ansible-playbook run. + +SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env} +for spec in ${SPECS}; do + unset DISTROS EXTRAS + echo "Loading file=${spec} ..." + . ${spec} || continue + : ${DISTROS:?} || continue + echo "DISTROS:" "${DISTROS[@]}" + echo "EXTRAS->" + printf " %s\n" "${EXTRAS[@]}" + let n=1 + for distro in "${DISTROS[@]}"; do + for extra in "${EXTRAS[@]:-NULL}"; do + # Magic value to let this for run once: + [[ ${extra} == NULL ]] && unset extra + docker rm -f "${NODES[@]}" + printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++)) + { + info "${distro}[${extra}] START: file_out=${file_out}" + time test_distro ${distro} ${extra} + } |& tee ${file_out} + # sleeping for the sake of the human to verify if they want + sleep 2m + done + done +done +egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out) diff --git a/kubespray/contrib/dind/test-most_distros-some_CNIs.env b/kubespray/contrib/dind/test-most_distros-some_CNIs.env new file mode 100644 index 0000000..f6e4e1a --- /dev/null +++ b/kubespray/contrib/dind/test-most_distros-some_CNIs.env @@ -0,0 +1,11 @@ +# Test spec file: used from ./run-test-distros.sh, will run +# each distro in $DISTROS overloading main kubespray ansible-playbook run +# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway) +# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}" +DISTROS=(debian ubuntu centos fedora) + +# Each line below will be added as --extra-vars to main playbook run +EXTRAS=( + 'kube_network_plugin=calico' + 'kube_network_plugin=weave' +) diff --git a/kubespray/contrib/dind/test-some_distros-kube_router_combo.env b/kubespray/contrib/dind/test-some_distros-kube_router_combo.env new file mode 100644 index 0000000..f267712 --- /dev/null +++ b/kubespray/contrib/dind/test-some_distros-kube_router_combo.env @@ -0,0 +1,6 @@ +DISTROS=(debian centos) +NETCHECKER_HOST=${NODES[0]} +EXTRAS=( + 'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}' + 'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}' +) diff --git a/kubespray/contrib/dind/test-some_distros-most_CNIs.env b/kubespray/contrib/dind/test-some_distros-most_CNIs.env new file mode 100644 index 0000000..2fb185c --- /dev/null +++ b/kubespray/contrib/dind/test-some_distros-most_CNIs.env @@ -0,0 +1,8 @@ +DISTROS=(debian centos) +EXTRAS=( + 'kube_network_plugin=calico {}' + 'kube_network_plugin=canal {}' + 'kube_network_plugin=cilium {}' + 'kube_network_plugin=flannel {}' + 'kube_network_plugin=weave {}' +) diff --git a/kubespray/contrib/inventory_builder/inventory.py b/kubespray/contrib/inventory_builder/inventory.py new file mode 100644 index 0000000..76e7c0c --- /dev/null +++ b/kubespray/contrib/inventory_builder/inventory.py @@ -0,0 +1,480 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Usage: inventory.py ip1 [ip2 ...] +# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5 +# +# Advanced usage: +# Add another host after initial creation: inventory.py 10.10.1.5 +# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5 +# Add hosts with different ip and access ip: +# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3 +# Add hosts with a specific hostname, ip, and optional access ip: +# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3 +# Delete a host: inventory.py -10.10.1.3 +# Delete a host by id: inventory.py -node1 +# +# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml +# YAML file should be in the following format: +# group1: +# host1: +# ip: X.X.X.X +# var: val +# group2: +# host2: +# ip: X.X.X.X + +from collections import OrderedDict +from ipaddress import ip_address +from ruamel.yaml import YAML + +import os +import re +import subprocess +import sys + +ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster', + 'calico_rr'] +PROTECTED_NAMES = ROLES +AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', + 'load', 'add'] +_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} +yaml = YAML() +yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict) + + +def get_var_as_bool(name, default): + value = os.environ.get(name, '') + return _boolean_states.get(value.lower(), default) + +# Configurable as shell vars start + + +CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml") +# Remove the reference of KUBE_MASTERS after some deprecation cycles. +KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS", + os.environ.get("KUBE_MASTERS", 2))) +# Reconfigures cluster distribution at scale +SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50)) +MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200)) + +DEBUG = get_var_as_bool("DEBUG", True) +HOST_PREFIX = os.environ.get("HOST_PREFIX", "node") +USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False) + +# Configurable as shell vars end + + +class KubesprayInventory(object): + + def __init__(self, changed_hosts=None, config_file=None): + self.config_file = config_file + self.yaml_config = {} + loadPreviousConfig = False + printHostnames = False + # See whether there are any commands to process + if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS: + if changed_hosts[0] == "add": + loadPreviousConfig = True + changed_hosts = changed_hosts[1:] + elif changed_hosts[0] == "print_hostnames": + loadPreviousConfig = True + printHostnames = True + else: + self.parse_command(changed_hosts[0], changed_hosts[1:]) + sys.exit(0) + + # If the user wants to remove a node, we need to load the config anyway + if changed_hosts and changed_hosts[0][0] == "-": + loadPreviousConfig = True + + if self.config_file and loadPreviousConfig: # Load previous YAML file + try: + self.hosts_file = open(config_file, 'r') + self.yaml_config = yaml.load(self.hosts_file) + except OSError as e: + # I am assuming we are catching "cannot open file" exceptions + print(e) + sys.exit(1) + + if printHostnames: + self.print_hostnames() + sys.exit(0) + + self.ensure_required_groups(ROLES) + + if changed_hosts: + changed_hosts = self.range2ips(changed_hosts) + self.hosts = self.build_hostnames(changed_hosts, + loadPreviousConfig) + self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES) + self.set_all(self.hosts) + self.set_k8s_cluster() + etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1 + self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count]) + if len(self.hosts) >= SCALE_THRESHOLD: + self.set_kube_control_plane(list(self.hosts.keys())[ + etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)]) + else: + self.set_kube_control_plane( + list(self.hosts.keys())[:KUBE_CONTROL_HOSTS]) + self.set_kube_node(self.hosts.keys()) + if len(self.hosts) >= SCALE_THRESHOLD: + self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count]) + else: # Show help if no options + self.show_help() + sys.exit(0) + + self.write_config(self.config_file) + + def write_config(self, config_file): + if config_file: + with open(self.config_file, 'w') as f: + yaml.dump(self.yaml_config, f) + + else: + print("WARNING: Unable to save config. Make sure you set " + "CONFIG_FILE env var.") + + def debug(self, msg): + if DEBUG: + print("DEBUG: {0}".format(msg)) + + def get_ip_from_opts(self, optstring): + if 'ip' in optstring: + return optstring['ip'] + else: + raise ValueError("IP parameter not found in options") + + def ensure_required_groups(self, groups): + for group in groups: + if group == 'all': + self.debug("Adding group {0}".format(group)) + if group not in self.yaml_config: + all_dict = OrderedDict([('hosts', OrderedDict({})), + ('children', OrderedDict({}))]) + self.yaml_config = {'all': all_dict} + else: + self.debug("Adding group {0}".format(group)) + if group not in self.yaml_config['all']['children']: + self.yaml_config['all']['children'][group] = {'hosts': {}} + + def get_host_id(self, host): + '''Returns integer host ID (without padding) from a given hostname.''' + try: + short_hostname = host.split('.')[0] + return int(re.findall("\\d+$", short_hostname)[-1]) + except IndexError: + raise ValueError("Host name must end in an integer") + + # Keeps already specified hosts, + # and adds or removes the hosts provided as an argument + def build_hostnames(self, changed_hosts, loadPreviousConfig=False): + existing_hosts = OrderedDict() + highest_host_id = 0 + # Load already existing hosts from the YAML + if loadPreviousConfig: + try: + for host in self.yaml_config['all']['hosts']: + # Read configuration of an existing host + hostConfig = self.yaml_config['all']['hosts'][host] + existing_hosts[host] = hostConfig + # If the existing host seems + # to have been created automatically, detect its ID + if host.startswith(HOST_PREFIX): + host_id = self.get_host_id(host) + if host_id > highest_host_id: + highest_host_id = host_id + except Exception as e: + # I am assuming we are catching automatically + # created hosts without IDs + print(e) + sys.exit(1) + + # FIXME(mattymo): Fix condition where delete then add reuses highest id + next_host_id = highest_host_id + 1 + next_host = "" + + all_hosts = existing_hosts.copy() + for host in changed_hosts: + # Delete the host from config the hostname/IP has a "-" prefix + if host[0] == "-": + realhost = host[1:] + if self.exists_hostname(all_hosts, realhost): + self.debug("Marked {0} for deletion.".format(realhost)) + all_hosts.pop(realhost) + elif self.exists_ip(all_hosts, realhost): + self.debug("Marked {0} for deletion.".format(realhost)) + self.delete_host_by_ip(all_hosts, realhost) + # Host/Argument starts with a digit, + # then we assume its an IP address + elif host[0].isdigit(): + if ',' in host: + ip, access_ip = host.split(',') + else: + ip = host + access_ip = host + if self.exists_hostname(all_hosts, host): + self.debug("Skipping existing host {0}.".format(host)) + continue + elif self.exists_ip(all_hosts, ip): + self.debug("Skipping existing host {0}.".format(ip)) + continue + + if USE_REAL_HOSTNAME: + cmd = ("ssh -oStrictHostKeyChecking=no " + + access_ip + " 'hostname -s'") + next_host = subprocess.check_output(cmd, shell=True) + next_host = next_host.strip().decode('ascii') + else: + # Generates a hostname because we have only an IP address + next_host = "{0}{1}".format(HOST_PREFIX, next_host_id) + next_host_id += 1 + # Uses automatically generated node name + # in case we dont provide it. + all_hosts[next_host] = {'ansible_host': access_ip, + 'ip': ip, + 'access_ip': access_ip} + # Host/Argument starts with a letter, then we assume its a hostname + elif host[0].isalpha(): + if ',' in host: + try: + hostname, ip, access_ip = host.split(',') + except Exception: + hostname, ip = host.split(',') + access_ip = ip + if self.exists_hostname(all_hosts, host): + self.debug("Skipping existing host {0}.".format(host)) + continue + elif self.exists_ip(all_hosts, ip): + self.debug("Skipping existing host {0}.".format(ip)) + continue + all_hosts[hostname] = {'ansible_host': access_ip, + 'ip': ip, + 'access_ip': access_ip} + return all_hosts + + # Expand IP ranges into individual addresses + def range2ips(self, hosts): + reworked_hosts = [] + + def ips(start_address, end_address): + try: + # Python 3.x + start = int(ip_address(start_address)) + end = int(ip_address(end_address)) + except Exception: + # Python 2.7 + start = int(ip_address(str(start_address))) + end = int(ip_address(str(end_address))) + return [ip_address(ip).exploded for ip in range(start, end + 1)] + + for host in hosts: + if '-' in host and not (host.startswith('-') or host[0].isalpha()): + start, end = host.strip().split('-') + try: + reworked_hosts.extend(ips(start, end)) + except ValueError: + raise Exception("Range of ip_addresses isn't valid") + else: + reworked_hosts.append(host) + return reworked_hosts + + def exists_hostname(self, existing_hosts, hostname): + return hostname in existing_hosts.keys() + + def exists_ip(self, existing_hosts, ip): + for host_opts in existing_hosts.values(): + if ip == self.get_ip_from_opts(host_opts): + return True + return False + + def delete_host_by_ip(self, existing_hosts, ip): + for hostname, host_opts in existing_hosts.items(): + if ip == self.get_ip_from_opts(host_opts): + del existing_hosts[hostname] + return + raise ValueError("Unable to find host by IP: {0}".format(ip)) + + def purge_invalid_hosts(self, hostnames, protected_names=[]): + for role in self.yaml_config['all']['children']: + if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa + all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa + for host in all_hosts.keys(): + if host not in hostnames and host not in protected_names: + self.debug( + "Host {0} removed from role {1}".format(host, role)) # noqa + del self.yaml_config['all']['children'][role]['hosts'][host] # noqa + # purge from all + if self.yaml_config['all']['hosts']: + all_hosts = self.yaml_config['all']['hosts'].copy() + for host in all_hosts.keys(): + if host not in hostnames and host not in protected_names: + self.debug("Host {0} removed from role all".format(host)) + del self.yaml_config['all']['hosts'][host] + + def add_host_to_group(self, group, host, opts=""): + self.debug("adding host {0} to group {1}".format(host, group)) + if group == 'all': + if self.yaml_config['all']['hosts'] is None: + self.yaml_config['all']['hosts'] = {host: None} + self.yaml_config['all']['hosts'][host] = opts + elif group != 'k8s_cluster:children': + if self.yaml_config['all']['children'][group]['hosts'] is None: + self.yaml_config['all']['children'][group]['hosts'] = { + host: None} + else: + self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa + + def set_kube_control_plane(self, hosts): + for host in hosts: + self.add_host_to_group('kube_control_plane', host) + + def set_all(self, hosts): + for host, opts in hosts.items(): + self.add_host_to_group('all', host, opts) + + def set_k8s_cluster(self): + k8s_cluster = {'children': {'kube_control_plane': None, + 'kube_node': None}} + self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster + + def set_calico_rr(self, hosts): + for host in hosts: + if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa + self.debug("Not adding {0} to calico_rr group because it " + "conflicts with kube_control_plane " + "group".format(host)) + continue + if host in self.yaml_config['all']['children']['kube_node']: + self.debug("Not adding {0} to calico_rr group because it " + "conflicts with kube_node group".format(host)) + continue + self.add_host_to_group('calico_rr', host) + + def set_kube_node(self, hosts): + for host in hosts: + if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD: + if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa + self.debug("Not adding {0} to kube_node group because of " + "scale deployment and host is in etcd " + "group.".format(host)) + continue + if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa + if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa + self.debug("Not adding {0} to kube_node group because of " + "scale deployment and host is in " + "kube_control_plane group.".format(host)) + continue + self.add_host_to_group('kube_node', host) + + def set_etcd(self, hosts): + for host in hosts: + self.add_host_to_group('etcd', host) + + def load_file(self, files=None): + '''Directly loads JSON to inventory.''' + + if not files: + raise Exception("No input file specified.") + + import json + + for filename in list(files): + # Try JSON + try: + with open(filename, 'r') as f: + data = json.load(f) + except ValueError: + raise Exception("Cannot read %s as JSON, or CSV", filename) + + self.ensure_required_groups(ROLES) + self.set_k8s_cluster() + for group, hosts in data.items(): + self.ensure_required_groups([group]) + for host, opts in hosts.items(): + optstring = {'ansible_host': opts['ip'], + 'ip': opts['ip'], + 'access_ip': opts['ip']} + self.add_host_to_group('all', host, optstring) + self.add_host_to_group(group, host) + self.write_config(self.config_file) + + def parse_command(self, command, args=None): + if command == 'help': + self.show_help() + elif command == 'print_cfg': + self.print_config() + elif command == 'print_ips': + self.print_ips() + elif command == 'print_hostnames': + self.print_hostnames() + elif command == 'load': + self.load_file(args) + else: + raise Exception("Invalid command specified.") + + def show_help(self): + help_text = '''Usage: inventory.py ip1 [ip2 ...] +Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5 + +Available commands: +help - Display this message +print_cfg - Write inventory file to stdout +print_ips - Write a space-delimited list of IPs from "all" group +print_hostnames - Write a space-delimited list of Hostnames from "all" group +add - Adds specified hosts into an already existing inventory + +Advanced usage: +Create new or overwrite old inventory file: inventory.py 10.10.1.5 +Add another host after initial creation: inventory.py add 10.10.1.6 +Add range of hosts: inventory.py 10.10.1.3-10.10.1.5 +Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3 +Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3 +Delete a host: inventory.py -10.10.1.3 +Delete a host by id: inventory.py -node1 + +Configurable env vars: +DEBUG Enable debug printing. Default: True +CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml +HOST_PREFIX Host prefix for generated hosts. Default: node +KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2 +SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50 +MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200 +''' # noqa + print(help_text) + + def print_config(self): + yaml.dump(self.yaml_config, sys.stdout) + + def print_hostnames(self): + print(' '.join(self.yaml_config['all']['hosts'].keys())) + + def print_ips(self): + ips = [] + for host, opts in self.yaml_config['all']['hosts'].items(): + ips.append(self.get_ip_from_opts(opts)) + print(' '.join(ips)) + + +def main(argv=None): + if not argv: + argv = sys.argv[1:] + KubesprayInventory(argv, CONFIG_FILE) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/kubespray/contrib/inventory_builder/requirements.txt b/kubespray/contrib/inventory_builder/requirements.txt new file mode 100644 index 0000000..3d833f6 --- /dev/null +++ b/kubespray/contrib/inventory_builder/requirements.txt @@ -0,0 +1,3 @@ +configparser>=3.3.0 +ruamel.yaml>=0.15.88 +ipaddress diff --git a/kubespray/contrib/inventory_builder/setup.cfg b/kubespray/contrib/inventory_builder/setup.cfg new file mode 100644 index 0000000..a775367 --- /dev/null +++ b/kubespray/contrib/inventory_builder/setup.cfg @@ -0,0 +1,3 @@ +[metadata] +name = kubespray-inventory-builder +version = 0.1 diff --git a/kubespray/contrib/inventory_builder/setup.py b/kubespray/contrib/inventory_builder/setup.py new file mode 100644 index 0000000..43c5ca1 --- /dev/null +++ b/kubespray/contrib/inventory_builder/setup.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=[], + pbr=False) diff --git a/kubespray/contrib/inventory_builder/test-requirements.txt b/kubespray/contrib/inventory_builder/test-requirements.txt new file mode 100644 index 0000000..4e334a0 --- /dev/null +++ b/kubespray/contrib/inventory_builder/test-requirements.txt @@ -0,0 +1,3 @@ +hacking>=0.10.2 +pytest>=2.8.0 +mock>=1.3.0 diff --git a/kubespray/contrib/inventory_builder/tests/test_inventory.py b/kubespray/contrib/inventory_builder/tests/test_inventory.py new file mode 100644 index 0000000..5d6649d --- /dev/null +++ b/kubespray/contrib/inventory_builder/tests/test_inventory.py @@ -0,0 +1,595 @@ +# Copyright 2016 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inventory +from io import StringIO +import unittest +from unittest import mock + +from collections import OrderedDict +import sys + +path = "./contrib/inventory_builder/" +if path not in sys.path: + sys.path.append(path) + +import inventory # noqa + + +class TestInventoryPrintHostnames(unittest.TestCase): + + @mock.patch('ruamel.yaml.YAML.load') + def test_print_hostnames(self, load_mock): + mock_io = mock.mock_open(read_data='') + load_mock.return_value = OrderedDict({'all': {'hosts': { + 'node1': {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}, + 'node2': {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'}}}}) + with mock.patch('builtins.open', mock_io): + with self.assertRaises(SystemExit) as cm: + with mock.patch('sys.stdout', new_callable=StringIO) as stdout: + inventory.KubesprayInventory( + changed_hosts=["print_hostnames"], + config_file="file") + self.assertEqual("node1 node2\n", stdout.getvalue()) + self.assertEqual(cm.exception.code, 0) + + +class TestInventory(unittest.TestCase): + @mock.patch('inventory.sys') + def setUp(self, sys_mock): + sys_mock.exit = mock.Mock() + super(TestInventory, self).setUp() + self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4'] + self.inv = inventory.KubesprayInventory() + + def test_get_ip_from_opts(self): + optstring = {'ansible_host': '10.90.3.2', + 'ip': '10.90.3.2', + 'access_ip': '10.90.3.2'} + expected = "10.90.3.2" + result = self.inv.get_ip_from_opts(optstring) + self.assertEqual(expected, result) + + def test_get_ip_from_opts_invalid(self): + optstring = "notanaddr=value something random!chars:D" + self.assertRaisesRegex(ValueError, "IP parameter not found", + self.inv.get_ip_from_opts, optstring) + + def test_ensure_required_groups(self): + groups = ['group1', 'group2'] + self.inv.ensure_required_groups(groups) + for group in groups: + self.assertIn(group, self.inv.yaml_config['all']['children']) + + def test_get_host_id(self): + hostnames = ['node99', 'no99de01', '01node01', 'node1.domain', + 'node3.xyz123.aaa'] + expected = [99, 1, 1, 1, 3] + for hostname, expected in zip(hostnames, expected): + result = self.inv.get_host_id(hostname) + self.assertEqual(expected, result) + + def test_get_host_id_invalid(self): + bad_hostnames = ['node', 'no99de', '01node', 'node.111111'] + for hostname in bad_hostnames: + self.assertRaisesRegex(ValueError, "Host name must end in an", + self.inv.get_host_id, hostname) + + def test_build_hostnames_add_duplicate(self): + changed_hosts = ['10.90.0.2'] + expected = OrderedDict([('node3', + {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'})]) + self.inv.yaml_config['all']['hosts'] = expected + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + def test_build_hostnames_add_two(self): + changed_hosts = ['10.90.0.2', '10.90.0.3'] + expected = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + self.inv.yaml_config['all']['hosts'] = OrderedDict() + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_add_three(self): + changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4'] + expected = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'}), + ('node3', {'ansible_host': '10.90.0.4', + 'ip': '10.90.0.4', + 'access_ip': '10.90.0.4'})]) + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_add_one(self): + changed_hosts = ['10.90.0.2'] + expected = OrderedDict([('node1', + {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'})]) + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_delete_first(self): + changed_hosts = ['-10.90.0.2'] + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + self.inv.yaml_config['all']['hosts'] = existing_hosts + expected = OrderedDict([ + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + def test_build_hostnames_delete_by_hostname(self): + changed_hosts = ['-node1'] + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + self.inv.yaml_config['all']['hosts'] = existing_hosts + expected = OrderedDict([ + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + def test_exists_hostname_positive(self): + hostname = 'node1' + expected = True + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + result = self.inv.exists_hostname(existing_hosts, hostname) + self.assertEqual(expected, result) + + def test_exists_hostname_negative(self): + hostname = 'node99' + expected = False + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + result = self.inv.exists_hostname(existing_hosts, hostname) + self.assertEqual(expected, result) + + def test_exists_ip_positive(self): + ip = '10.90.0.2' + expected = True + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + result = self.inv.exists_ip(existing_hosts, ip) + self.assertEqual(expected, result) + + def test_exists_ip_negative(self): + ip = '10.90.0.200' + expected = False + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + result = self.inv.exists_ip(existing_hosts, ip) + self.assertEqual(expected, result) + + def test_delete_host_by_ip_positive(self): + ip = '10.90.0.2' + expected = OrderedDict([ + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + self.inv.delete_host_by_ip(existing_hosts, ip) + self.assertEqual(expected, existing_hosts) + + def test_delete_host_by_ip_negative(self): + ip = '10.90.0.200' + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'})]) + self.assertRaisesRegex(ValueError, "Unable to find host", + self.inv.delete_host_by_ip, existing_hosts, ip) + + def test_purge_invalid_hosts(self): + proper_hostnames = ['node1', 'node2'] + bad_host = 'doesnotbelong2' + existing_hosts = OrderedDict([ + ('node1', {'ansible_host': '10.90.0.2', + 'ip': '10.90.0.2', + 'access_ip': '10.90.0.2'}), + ('node2', {'ansible_host': '10.90.0.3', + 'ip': '10.90.0.3', + 'access_ip': '10.90.0.3'}), + ('doesnotbelong2', {'whateveropts=ilike'})]) + self.inv.yaml_config['all']['hosts'] = existing_hosts + self.inv.purge_invalid_hosts(proper_hostnames) + self.assertNotIn( + bad_host, self.inv.yaml_config['all']['hosts'].keys()) + + def test_add_host_to_group(self): + group = 'etcd' + host = 'node1' + opts = {'ip': '10.90.0.2'} + + self.inv.add_host_to_group(group, host, opts) + self.assertEqual( + self.inv.yaml_config['all']['children'][group]['hosts'].get(host), + None) + + def test_set_kube_control_plane(self): + group = 'kube_control_plane' + host = 'node1' + + self.inv.set_kube_control_plane([host]) + self.assertIn( + host, self.inv.yaml_config['all']['children'][group]['hosts']) + + def test_set_all(self): + hosts = OrderedDict([ + ('node1', 'opt1'), + ('node2', 'opt2')]) + + self.inv.set_all(hosts) + for host, opt in hosts.items(): + self.assertEqual( + self.inv.yaml_config['all']['hosts'].get(host), opt) + + def test_set_k8s_cluster(self): + group = 'k8s_cluster' + expected_hosts = ['kube_node', 'kube_control_plane'] + + self.inv.set_k8s_cluster() + for host in expected_hosts: + self.assertIn( + host, + self.inv.yaml_config['all']['children'][group]['children']) + + def test_set_kube_node(self): + group = 'kube_node' + host = 'node1' + + self.inv.set_kube_node([host]) + self.assertIn( + host, self.inv.yaml_config['all']['children'][group]['hosts']) + + def test_set_etcd(self): + group = 'etcd' + host = 'node1' + + self.inv.set_etcd([host]) + self.assertIn( + host, self.inv.yaml_config['all']['children'][group]['hosts']) + + def test_scale_scenario_one(self): + num_nodes = 50 + hosts = OrderedDict() + + for hostid in range(1, num_nodes+1): + hosts["node" + str(hostid)] = "" + + self.inv.set_all(hosts) + self.inv.set_etcd(list(hosts.keys())[0:3]) + self.inv.set_kube_control_plane(list(hosts.keys())[0:2]) + self.inv.set_kube_node(hosts.keys()) + for h in range(3): + self.assertFalse( + list(hosts.keys())[h] in + self.inv.yaml_config['all']['children']['kube_node']['hosts']) + + def test_scale_scenario_two(self): + num_nodes = 500 + hosts = OrderedDict() + + for hostid in range(1, num_nodes+1): + hosts["node" + str(hostid)] = "" + + self.inv.set_all(hosts) + self.inv.set_etcd(list(hosts.keys())[0:3]) + self.inv.set_kube_control_plane(list(hosts.keys())[3:5]) + self.inv.set_kube_node(hosts.keys()) + for h in range(5): + self.assertFalse( + list(hosts.keys())[h] in + self.inv.yaml_config['all']['children']['kube_node']['hosts']) + + def test_range2ips_range(self): + changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8'] + expected = ['10.90.0.2', + '10.90.0.4', + '10.90.0.5', + '10.90.0.6', + '10.90.0.8'] + result = self.inv.range2ips(changed_hosts) + self.assertEqual(expected, result) + + def test_range2ips_incorrect_range(self): + host_range = ['10.90.0.4-a.9b.c.e'] + self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid", + self.inv.range2ips, host_range) + + def test_build_hostnames_create_with_one_different_ips(self): + changed_hosts = ['10.90.0.2,192.168.0.2'] + expected = OrderedDict([('node1', + {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'})]) + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_create_with_two_different_ips(self): + changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3'] + expected = OrderedDict([ + ('node1', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node2', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'})]) + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_create_with_three_different_ips(self): + changed_hosts = ['10.90.0.2,192.168.0.2', + '10.90.0.3,192.168.0.3', + '10.90.0.4,192.168.0.4'] + expected = OrderedDict([ + ('node1', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node2', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node3', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'})]) + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_overwrite_one_with_different_ips(self): + changed_hosts = ['10.90.0.2,192.168.0.2'] + expected = OrderedDict([('node1', + {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'})]) + existing = OrderedDict([('node5', + {'ansible_host': '192.168.0.5', + 'ip': '10.90.0.5', + 'access_ip': '192.168.0.5'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_overwrite_three_with_different_ips(self): + changed_hosts = ['10.90.0.2,192.168.0.2'] + expected = OrderedDict([('node1', + {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'})]) + existing = OrderedDict([ + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'}), + ('node5', {'ansible_host': '192.168.0.5', + 'ip': '10.90.0.5', + 'access_ip': '192.168.0.5'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts) + self.assertEqual(expected, result) + + def test_build_hostnames_different_ips_add_duplicate(self): + changed_hosts = ['10.90.0.2,192.168.0.2'] + expected = OrderedDict([('node3', + {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'})]) + existing = expected + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + def test_build_hostnames_add_two_different_ips_into_one_existing(self): + changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4'] + expected = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'})]) + + existing = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + def test_build_hostnames_add_two_different_ips_into_two_existing(self): + changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5'] + expected = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'}), + ('node5', {'ansible_host': '192.168.0.5', + 'ip': '10.90.0.5', + 'access_ip': '192.168.0.5'})]) + + existing = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + def test_build_hostnames_add_two_different_ips_into_three_existing(self): + changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6'] + expected = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'}), + ('node5', {'ansible_host': '192.168.0.5', + 'ip': '10.90.0.5', + 'access_ip': '192.168.0.5'}), + ('node6', {'ansible_host': '192.168.0.6', + 'ip': '10.90.0.6', + 'access_ip': '192.168.0.6'})]) + + existing = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + # Add two IP addresses into a config that has + # three already defined IP addresses. One of the IP addresses + # is a duplicate. + def test_build_hostnames_add_two_duplicate_one_overlap(self): + changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5'] + expected = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'}), + ('node5', {'ansible_host': '192.168.0.5', + 'ip': '10.90.0.5', + 'access_ip': '192.168.0.5'})]) + + existing = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) + + # Add two duplicate IP addresses into a config that has + # three already defined IP addresses + def test_build_hostnames_add_two_duplicate_two_overlap(self): + changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4'] + expected = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'})]) + + existing = OrderedDict([ + ('node2', {'ansible_host': '192.168.0.2', + 'ip': '10.90.0.2', + 'access_ip': '192.168.0.2'}), + ('node3', {'ansible_host': '192.168.0.3', + 'ip': '10.90.0.3', + 'access_ip': '192.168.0.3'}), + ('node4', {'ansible_host': '192.168.0.4', + 'ip': '10.90.0.4', + 'access_ip': '192.168.0.4'})]) + self.inv.yaml_config['all']['hosts'] = existing + result = self.inv.build_hostnames(changed_hosts, True) + self.assertEqual(expected, result) diff --git a/kubespray/contrib/inventory_builder/tox.ini b/kubespray/contrib/inventory_builder/tox.ini new file mode 100644 index 0000000..889fe78 --- /dev/null +++ b/kubespray/contrib/inventory_builder/tox.ini @@ -0,0 +1,28 @@ +[tox] +minversion = 1.6 +skipsdist = True +envlist = pep8, py33 + +[testenv] +whitelist_externals = py.test +usedevelop = True +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = VIRTUAL_ENV={envdir} +passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY +commands = pytest -vv #{posargs:./tests} + +[testenv:pep8] +usedevelop = False +whitelist_externals = bash +commands = + bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8" + +[testenv:venv] +commands = {posargs} + +[flake8] +show-source = true +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg diff --git a/kubespray/contrib/kvm-setup/README.md b/kubespray/contrib/kvm-setup/README.md new file mode 100644 index 0000000..559bc65 --- /dev/null +++ b/kubespray/contrib/kvm-setup/README.md @@ -0,0 +1,11 @@ +# Kubespray on KVM Virtual Machines hypervisor preparation + +A simple playbook to ensure your system has the right settings to enable Kubespray +deployment on VMs. + +This playbook does not create Virtual Machines, nor does it run Kubespray itself. + +## User creation + +If you want to create a user for running Kubespray deployment, you should specify +both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`. diff --git a/kubespray/contrib/kvm-setup/group_vars/all b/kubespray/contrib/kvm-setup/group_vars/all new file mode 100644 index 0000000..6edfd8f --- /dev/null +++ b/kubespray/contrib/kvm-setup/group_vars/all @@ -0,0 +1,3 @@ +#k8s_deployment_user: kubespray +#k8s_deployment_user_pkey_path: /tmp/ssh_rsa + diff --git a/kubespray/contrib/kvm-setup/kvm-setup.yml b/kubespray/contrib/kvm-setup/kvm-setup.yml new file mode 100644 index 0000000..18b7206 --- /dev/null +++ b/kubespray/contrib/kvm-setup/kvm-setup.yml @@ -0,0 +1,8 @@ +--- +- hosts: localhost + gather_facts: False + become: yes + vars: + - bootstrap_os: none + roles: + - kvm-setup diff --git a/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml b/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml new file mode 100644 index 0000000..a033c4e --- /dev/null +++ b/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml @@ -0,0 +1,30 @@ +--- + +- name: Install required packages + package: + name: "{{ item }}" + state: present + with_items: + - bind-utils + - ntp + when: ansible_os_family == "RedHat" + +- name: Install required packages + apt: + upgrade: yes + update_cache: yes + cache_valid_time: 3600 + name: "{{ item }}" + state: present + install_recommends: no + with_items: + - dnsutils + - ntp + when: ansible_os_family == "Debian" + +# Create deployment user if required +- include: user.yml + when: k8s_deployment_user is defined + +# Set proper sysctl values +- include: sysctl.yml diff --git a/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml b/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml new file mode 100644 index 0000000..d991b10 --- /dev/null +++ b/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml @@ -0,0 +1,46 @@ +--- +- name: Load br_netfilter module + modprobe: + name: br_netfilter + state: present + register: br_netfilter + +- name: Add br_netfilter into /etc/modules + lineinfile: + dest: /etc/modules + state: present + line: 'br_netfilter' + when: br_netfilter is defined and ansible_os_family == 'Debian' + +- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf + copy: + dest: /etc/modules-load.d/kubespray.conf + content: |- + ### This file is managed by Ansible + br-netfilter + owner: root + group: root + mode: 0644 + when: br_netfilter is defined + + +- name: Enable net.ipv4.ip_forward in sysctl + sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_file: "{{ sysctl_file_path }}" + state: present + reload: yes + +- name: Set bridge-nf-call-{arptables,iptables} to 0 + sysctl: + name: "{{ item }}" + state: present + value: 0 + sysctl_file: "{{ sysctl_file_path }}" + reload: yes + with_items: + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + - net.bridge.bridge-nf-call-iptables + when: br_netfilter is defined diff --git a/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml b/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml new file mode 100644 index 0000000..c2d3123 --- /dev/null +++ b/kubespray/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml @@ -0,0 +1,47 @@ +--- +- name: Create user {{ k8s_deployment_user }} + user: + name: "{{ k8s_deployment_user }}" + groups: adm + shell: /bin/bash + +- name: Ensure that .ssh exists + file: + path: "/home/{{ k8s_deployment_user }}/.ssh" + state: directory + owner: "{{ k8s_deployment_user }}" + group: "{{ k8s_deployment_user }}" + mode: 0700 + +- name: Configure sudo for deployment user + copy: + content: | + %{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL + dest: "/etc/sudoers.d/55-k8s-deployment" + owner: root + group: root + mode: 0644 + +- name: Write private SSH key + copy: + src: "{{ k8s_deployment_user_pkey_path }}" + dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa" + mode: 0400 + owner: "{{ k8s_deployment_user }}" + group: "{{ k8s_deployment_user }}" + when: k8s_deployment_user_pkey_path is defined + +- name: Write public SSH key + shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \ + > /home/{{ k8s_deployment_user }}/.ssh/authorized_keys" + args: + creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys" + when: k8s_deployment_user_pkey_path is defined + +- name: Fix ssh-pub-key permissions + file: + path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys" + mode: 0600 + owner: "{{ k8s_deployment_user }}" + group: "{{ k8s_deployment_user }}" + when: k8s_deployment_user_pkey_path is defined diff --git a/kubespray/contrib/misc/clusteradmin-rbac.yml b/kubespray/contrib/misc/clusteradmin-rbac.yml new file mode 100644 index 0000000..c02322f --- /dev/null +++ b/kubespray/contrib/misc/clusteradmin-rbac.yml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + labels: + k8s-app: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system diff --git a/kubespray/contrib/mitogen/mitogen.yml b/kubespray/contrib/mitogen/mitogen.yml new file mode 100644 index 0000000..4dbd0fb --- /dev/null +++ b/kubespray/contrib/mitogen/mitogen.yml @@ -0,0 +1,49 @@ +--- +- name: Check ansible version + import_playbook: ansible_version.yml + +- hosts: localhost + strategy: linear + vars: + mitogen_version: 0.3.2 + mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz + ansible_connection: local + tasks: + - name: Create mitogen plugin dir + file: + path: "{{ item }}" + state: directory + mode: 0755 + become: false + loop: + - "{{ playbook_dir }}/plugins/mitogen" + - "{{ playbook_dir }}/dist" + + - name: download mitogen release + get_url: + url: "{{ mitogen_url }}" + dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz" + validate_certs: true + + - name: extract archive + unarchive: + src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz" + dest: "{{ playbook_dir }}/dist/" + + - name: copy plugin + synchronize: + src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/" + dest: "{{ playbook_dir }}/plugins/mitogen" + + - name: add strategy to ansible.cfg + ini_file: + path: ansible.cfg + mode: 0644 + section: "{{ item.section | d('defaults') }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: + - option: strategy + value: mitogen_linear + - option: strategy_plugins + value: plugins/mitogen/ansible_mitogen/plugins/strategy diff --git a/kubespray/contrib/network-storage/glusterfs/README.md b/kubespray/contrib/network-storage/glusterfs/README.md new file mode 100644 index 0000000..bfe0a4d --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/README.md @@ -0,0 +1,92 @@ +# Deploying a Kubespray Kubernetes Cluster with GlusterFS + +You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section. + +## Using an Ansible inventory + +In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group. + +Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu): + +```shell +ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml +``` + +This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute: + +```shell +ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml +``` + +If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=` variable in the inventory file that you just created, for each machine/VM: + +```shell +k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core +k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core +k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core +``` + +## Using Terraform and Ansible + +First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like: + +```ini +cluster_name = "cluster1" +number_of_k8s_masters = "1" +number_of_k8s_masters_no_floating_ip = "2" +number_of_k8s_nodes_no_floating_ip = "0" +number_of_k8s_nodes = "0" +public_key_path = "~/.ssh/my-desired-key.pub" +image = "Ubuntu 16.04" +ssh_user = "ubuntu" +flavor_k8s_node = "node-flavor-id-in-your-openstack" +flavor_k8s_master = "master-flavor-id-in-your-openstack" +network_name = "k8s-network" +floatingip_pool = "net_external" + +# GlusterFS variables +flavor_gfs_node = "gluster-flavor-id-in-your-openstack" +image_gfs = "Ubuntu 16.04" +number_of_gfs_nodes_no_floating_ip = "3" +gfs_volume_size_in_gb = "50" +ssh_user_gfs = "ubuntu" +``` + +As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform: + +```shell +$ source ~/.stackrc +$ eval $(ssh-agent -s) +$ ssh-add ~/.ssh/my-desired-key +$ echo Setting up Terraform creds && \ + export TF_VAR_username=${OS_USERNAME} && \ + export TF_VAR_password=${OS_PASSWORD} && \ + export TF_VAR_tenant=${OS_TENANT_NAME} && \ + export TF_VAR_auth_url=${OS_AUTH_URL} +``` + +Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster: + +```shell +terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack +``` + +This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping). + +Then, provision your Kubernetes (kubespray) cluster with the following ansible call: + +```shell +ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml +``` + +Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call: + +```shell +ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml +``` + +If you need to destroy the cluster, you can run: + +```shell +terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack +``` diff --git a/kubespray/contrib/network-storage/glusterfs/glusterfs.yml b/kubespray/contrib/network-storage/glusterfs/glusterfs.yml new file mode 100644 index 0000000..79fc3ae --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/glusterfs.yml @@ -0,0 +1,24 @@ +--- +- hosts: gfs-cluster + gather_facts: false + vars: + ansible_ssh_pipelining: false + roles: + - { role: bootstrap-os, tags: bootstrap-os} + +- hosts: all + gather_facts: true + +- hosts: gfs-cluster + vars: + ansible_ssh_pipelining: true + roles: + - { role: glusterfs/server } + +- hosts: k8s_cluster + roles: + - { role: glusterfs/client } + +- hosts: kube_control_plane[0] + roles: + - { role: kubernetes-pv } diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/all.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/aws.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/azure.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/containerd.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/coreos.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/cri-o.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/docker.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/etcd.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/gcp.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/hcloud.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/oci.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/offline.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/openstack.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/upcloud.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/all/vsphere.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/etcd.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/network-storage/glusterfs/inventory.example b/kubespray/contrib/network-storage/glusterfs/inventory.example new file mode 100644 index 0000000..84dd022 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/inventory.example @@ -0,0 +1,44 @@ +# ## Configure 'ip' variable to bind kubernetes services on a +# ## different ip than the default iface +# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1 +# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2 +# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3 +# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4 +# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5 +# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6 +# +# ## GlusterFS nodes +# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default). +# ## As in the previous case, you can set ip to give direct communication on internal IPs +# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7 +# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8 +# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9 + +# [kube_control_plane] +# node1 +# node2 + +# [etcd] +# node1 +# node2 +# node3 + +# [kube_node] +# node2 +# node3 +# node4 +# node5 +# node6 + +# [k8s_cluster:children] +# kube_node +# kube_control_plane + +# [gfs-cluster] +# gfs_node1 +# gfs_node2 +# gfs_node3 + +# [network-storage:children] +# gfs-cluster + diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/defaults/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/defaults/main.yml new file mode 100644 index 0000000..9b31456 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/defaults/main.yml @@ -0,0 +1,32 @@ +--- +## CentOS/RHEL/AlmaLinux specific variables +# Use the fastestmirror yum plugin +centos_fastestmirror_enabled: false + +## Flatcar Container Linux specific variables +# Disable locksmithd or leave it in its current state +coreos_locksmithd_disable: false + +## Oracle Linux specific variables +# Install public repo on Oracle Linux +use_oracle_public_repo: true + +fedora_coreos_packages: + - python + - python3-libselinux + - ethtool # required in kubeadm preflight phase for verifying the environment + - ipset # required in kubeadm preflight phase for verifying the environment + - conntrack-tools # required by kube-proxy + +## General +# Set the hostname to inventory_hostname +override_system_hostname: true + +is_fedora_coreos: false + +skip_http_proxy_on_os_packages: false + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/files/bootstrap.sh b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/files/bootstrap.sh new file mode 100755 index 0000000..69b7b75 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/files/bootstrap.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -e + +BINDIR="/opt/bin" +if [[ -e $BINDIR/.bootstrapped ]]; then + exit 0 +fi + +ARCH=$(uname -m) +case $ARCH in + "x86_64") + PYPY_ARCH=linux64 + PYPI_HASH=46818cb3d74b96b34787548343d266e2562b531ddbaf330383ba930ff1930ed5 + ;; + "aarch64") + PYPY_ARCH=aarch64 + PYPI_HASH=2e1ae193d98bc51439642a7618d521ea019f45b8fb226940f7e334c548d2b4b9 + ;; + *) + echo "Unsupported Architecture: ${ARCH}" + exit 1 +esac + +PYTHON_VERSION=3.9 +PYPY_VERSION=7.3.9 +PYPY_FILENAME="pypy${PYTHON_VERSION}-v${PYPY_VERSION}-${PYPY_ARCH}" +PYPI_URL="https://downloads.python.org/pypy/${PYPY_FILENAME}.tar.bz2" + +mkdir -p $BINDIR + +cd $BINDIR + +TAR_FILE=pyp.tar.bz2 +wget -O "${TAR_FILE}" "${PYPI_URL}" +echo "${PYPI_HASH} ${TAR_FILE}" | sha256sum -c - +tar -xjf "${TAR_FILE}" && rm "${TAR_FILE}" +mv -n "${PYPY_FILENAME}" pypy3 + +ln -s ./pypy3/bin/pypy3 python +$BINDIR/python --version + +touch $BINDIR/.bootstrapped diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/handlers/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/handlers/main.yml new file mode 100644 index 0000000..7c8c4fe --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: RHEL auto-attach subscription + command: /sbin/subscription-manager attach --auto + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/converge.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/converge.yml new file mode 100644 index 0000000..1f44ec9 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/converge.yml @@ -0,0 +1,6 @@ +--- +- name: Converge + hosts: all + gather_facts: no + roles: + - role: bootstrap-os diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/molecule.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/molecule.yml new file mode 100644 index 0000000..8413baa --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/molecule.yml @@ -0,0 +1,57 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: ubuntu16 + box: generic/ubuntu1604 + cpus: 1 + memory: 512 + - name: ubuntu18 + box: generic/ubuntu1804 + cpus: 1 + memory: 512 + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 + - name: centos7 + box: centos/7 + cpus: 1 + memory: 512 + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 512 + - name: debian9 + box: generic/debian9 + cpus: 1 + memory: 512 + - name: debian10 + box: generic/debian10 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + inventory: + group_vars: + all: + user: + name: foo + comment: My test comment +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/tests/test_default.py b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/tests/test_default.py new file mode 100644 index 0000000..64c59dd --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/molecule/default/tests/test_default.py @@ -0,0 +1,11 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE'] +).get_hosts('all') + + +def test_python(host): + assert host.exists('python3') or host.exists('python') diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-amazon.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-amazon.yml new file mode 100644 index 0000000..2b4d665 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-amazon.yml @@ -0,0 +1,13 @@ +--- +- name: Enable EPEL repo for Amazon Linux + yum_repository: + name: epel + file: epel + description: Extra Packages for Enterprise Linux 7 - $basearch + baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch + gpgcheck: yes + gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no + when: epel_enabled diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-centos.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-centos.yml new file mode 100644 index 0000000..007fdce --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -0,0 +1,117 @@ +--- +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined + ini_file: + path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +# For Oracle Linux install public repo +- name: Download Oracle Linux public yum repo + get_url: + url: https://yum.oracle.com/public-yum-ol7.repo + dest: /etc/yum.repos.d/public-yum-ol7.repo + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) < 7.6 + environment: "{{ proxy_env }}" + +- name: Enable Oracle Linux repo + ini_file: + dest: /etc/yum.repos.d/public-yum-ol7.repo + section: "{{ item }}" + option: enabled + value: "1" + mode: 0644 + with_items: + - ol7_latest + - ol7_addons + - ol7_developer_EPEL + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) < 7.6 + +- name: Install EPEL for Oracle Linux repo package + package: + name: "oracle-epel-release-el{{ ansible_distribution_major_version }}" + state: present + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + +- name: Enable Oracle Linux repo + ini_file: + dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo" + section: "ol{{ ansible_distribution_major_version }}_addons" + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" } + - { option: "enabled", value: "1" } + - { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/$basearch/" } + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + +- name: Enable Centos extra repo for Oracle Linux + ini_file: + dest: "/etc/yum.repos.d/centos-extras.repo" + section: "extras" + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" } + - { option: "enabled", value: "1" } + - { option: "gpgcheck", value: "0" } + - { option: "baseurl", value: "http://mirror.centos.org/centos/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version|int > 7 %}os/{% endif %}" } + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + - (ansible_distribution_version | float) < 9 + +# CentOS ships with python installed + +- name: Check presence of fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf + get_attributes: no + get_checksum: no + get_mime: no + register: fastestmirror + +# the fastestmirror plugin can actually slow down Ansible deployments +- name: Disable fastestmirror plugin if requested + lineinfile: + dest: /etc/yum/pluginconf.d/fastestmirror.conf + regexp: "^enabled=.*" + line: "enabled=0" + state: present + become: true + when: + - fastestmirror.stat.exists + - not centos_fastestmirror_enabled + +# libselinux-python is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux python package + package: + name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + state: present + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml new file mode 100644 index 0000000..de42e3c --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml @@ -0,0 +1,16 @@ +--- +# ClearLinux ships with Python installed + +- name: Install basic package to run containers + package: + name: containers-basic + state: present + +- name: Make sure docker service is enabled + systemd: + name: docker + masked: false + enabled: true + daemon_reload: true + state: started + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-coreos.yml new file mode 100644 index 0000000..737a7ec --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -0,0 +1,37 @@ +--- +# CoreOS ships without Python installed + +- name: Check if bootstrap is needed + raw: stat /opt/bin/.bootstrapped + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Force binaries directory for Container Linux by CoreOS and Flatcar + set_fact: + bin_dir: "/opt/bin" + tags: + - facts + +- name: Run bootstrap.sh + script: bootstrap.sh + become: true + environment: "{{ proxy_env }}" + when: + - need_bootstrap.rc != 0 + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "{{ bin_dir }}/python" + tags: + - facts + +- name: Disable auto-upgrade + systemd: + name: locksmithd.service + masked: true + state: stopped + when: + - coreos_locksmithd_disable diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-debian.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-debian.yml new file mode 100644 index 0000000..47bad20 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-debian.yml @@ -0,0 +1,76 @@ +--- +# Some Debian based distros ship without Python installed + +- name: Check if bootstrap is needed + raw: which python3 + register: need_bootstrap + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + tags: + - facts + +- name: Check http::proxy in apt configuration files + raw: apt-config dump | grep -qsi 'Acquire::http::proxy' + register: need_http_proxy + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- name: Add http_proxy to /etc/apt/apt.conf if http_proxy is defined + raw: echo 'Acquire::http::proxy "{{ http_proxy }}";' >> /etc/apt/apt.conf + become: true + when: + - http_proxy is defined + - need_http_proxy.rc != 0 + - not skip_http_proxy_on_os_packages + +- name: Check https::proxy in apt configuration files + raw: apt-config dump | grep -qsi 'Acquire::https::proxy' + register: need_https_proxy + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- name: Add https_proxy to /etc/apt/apt.conf if https_proxy is defined + raw: echo 'Acquire::https::proxy "{{ https_proxy }}";' >> /etc/apt/apt.conf + become: true + when: + - https_proxy is defined + - need_https_proxy.rc != 0 + - not skip_http_proxy_on_os_packages + +- name: Install python3 + raw: + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y python3-minimal + become: true + when: + - need_bootstrap.rc != 0 + +- name: Update Apt cache + raw: apt-get update --allow-releaseinfo-change + become: true + when: + - '''ID=debian'' in os_release.stdout_lines' + - '''VERSION_ID="10"'' in os_release.stdout_lines or ''VERSION_ID="11"'' in os_release.stdout_lines' + register: bootstrap_update_apt_result + changed_when: + - '"changed its" in bootstrap_update_apt_result.stdout' + - '"value from" in bootstrap_update_apt_result.stdout' + ignore_errors: true + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "/usr/bin/python3" + +# Workaround for https://github.com/ansible/ansible/issues/25543 +- name: Install dbus for the hostname module + package: + name: dbus + state: present + use: apt + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml new file mode 100644 index 0000000..d3fd1c9 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml @@ -0,0 +1,46 @@ +--- + +- name: Check if bootstrap is needed + raw: which python + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Remove podman network cni + raw: "podman network rm podman" + become: true + ignore_errors: true # noqa ignore-errors + when: need_bootstrap.rc != 0 + +- name: Clean up possible pending packages on fedora coreos + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree cleanup -p }}" + become: true + when: need_bootstrap.rc != 0 + +- name: Install required packages on fedora coreos + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages|join(' ') }}" + become: true + when: need_bootstrap.rc != 0 + +- name: Reboot immediately for updated ostree + raw: "nohup bash -c 'sleep 5s && shutdown -r now'" + become: true + ignore_errors: true # noqa ignore-errors + ignore_unreachable: yes + when: need_bootstrap.rc != 0 + +- name: Wait for the reboot to complete + wait_for_connection: + timeout: 240 + connect_timeout: 20 + delay: 5 + sleep: 5 + when: need_bootstrap.rc != 0 + +- name: Store the fact if this is an fedora core os host + set_fact: + is_fedora_coreos: True + tags: + - facts diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-fedora.yml new file mode 100644 index 0000000..1613173 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-fedora.yml @@ -0,0 +1,36 @@ +--- +# Some Fedora based distros ship without Python installed + +- name: Check if bootstrap is needed + raw: which python + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Add proxy to dnf.conf if http_proxy is defined + ini_file: + path: "/etc/dnf/dnf.conf" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +- name: Install python3 on fedora + raw: "dnf install --assumeyes --quiet python3" + become: true + when: + - need_bootstrap.rc != 0 + +# libselinux-python3 is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux-python3 + package: + name: libselinux-python3 + state: present + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-flatcar.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-flatcar.yml new file mode 100644 index 0000000..b0f3a9e --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-flatcar.yml @@ -0,0 +1,37 @@ +--- +# Flatcar Container Linux ships without Python installed + +- name: Check if bootstrap is needed + raw: stat /opt/bin/.bootstrapped + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + tags: + - facts + +- name: Run bootstrap.sh + script: bootstrap.sh + become: true + environment: "{{ proxy_env }}" + when: + - need_bootstrap.rc != 0 + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "{{ bin_dir }}/python" + tags: + - facts + +- name: Disable auto-upgrade + systemd: + name: locksmithd.service + masked: true + state: stopped + when: + - coreos_locksmithd_disable diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-opensuse.yml new file mode 100644 index 0000000..c833bfd --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-opensuse.yml @@ -0,0 +1,85 @@ +--- +# OpenSUSE ships with Python installed +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Check that /etc/sysconfig/proxy file exists + stat: + path: /etc/sysconfig/proxy + get_attributes: no + get_checksum: no + get_mime: no + register: stat_result + +- name: Create the /etc/sysconfig/proxy empty file + file: # noqa risky-file-permissions + path: /etc/sysconfig/proxy + state: touch + when: + - http_proxy is defined or https_proxy is defined + - not stat_result.stat.exists + +- name: Set the http_proxy in /etc/sysconfig/proxy + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^HTTP_PROXY=' + line: 'HTTP_PROXY="{{ http_proxy }}"' + become: true + when: + - http_proxy is defined + +- name: Set the https_proxy in /etc/sysconfig/proxy + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^HTTPS_PROXY=' + line: 'HTTPS_PROXY="{{ https_proxy }}"' + become: true + when: + - https_proxy is defined + +- name: Enable proxies + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^PROXY_ENABLED=' + line: 'PROXY_ENABLED="yes"' + become: true + when: + - http_proxy is defined or https_proxy is defined + +# Required for zypper module +- name: Install python-xml + shell: zypper refresh && zypper --non-interactive install python-xml + changed_when: false + become: true + tags: + - facts + +# Without this package, the get_url module fails when trying to handle https +- name: Install python-cryptography + zypper: + name: python-cryptography + state: present + update_cache: true + become: true + when: + - ansible_distribution_version is version('15.4', '<') + +- name: Install python3-cryptography + zypper: + name: python3-cryptography + state: present + update_cache: true + become: true + when: + - ansible_distribution_version is version('15.4', '>=') + +# Nerdctl needs some basic packages to get an environment up +- name: Install basic dependencies + zypper: + name: + - iptables + - apparmor-parser + state: present + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-redhat.yml new file mode 100644 index 0000000..8f32388 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/bootstrap-redhat.yml @@ -0,0 +1,121 @@ +--- +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined + ini_file: + path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +- name: Add proxy to RHEL subscription-manager if http_proxy is defined + command: /sbin/subscription-manager config --server.proxy_hostname={{ http_proxy | regex_replace(':\d+$') }} --server.proxy_port={{ http_proxy | regex_replace('^.*:') }} + become: true + when: + - not skip_http_proxy_on_os_packages + - http_proxy is defined + +- name: Check RHEL subscription-manager status + command: /sbin/subscription-manager status + register: rh_subscription_status + changed_when: "rh_subscription_status != 0" + ignore_errors: true # noqa ignore-errors + become: true + +- name: RHEL subscription Organization ID/Activation Key registration + redhat_subscription: + state: present + org_id: "{{ rh_subscription_org_id }}" + activationkey: "{{ rh_subscription_activation_key }}" + auto_attach: true + force_register: true + syspurpose: + usage: "{{ rh_subscription_usage }}" + role: "{{ rh_subscription_role }}" + service_level_agreement: "{{ rh_subscription_sla }}" + sync: true + notify: RHEL auto-attach subscription + ignore_errors: true # noqa ignore-errors + become: true + when: + - rh_subscription_org_id is defined + - rh_subscription_status.changed + +# this task has no_log set to prevent logging security sensitive information such as subscription passwords +- name: RHEL subscription Username/Password registration + redhat_subscription: + state: present + username: "{{ rh_subscription_username }}" + password: "{{ rh_subscription_password }}" + auto_attach: true + force_register: true + syspurpose: + usage: "{{ rh_subscription_usage }}" + role: "{{ rh_subscription_role }}" + service_level_agreement: "{{ rh_subscription_sla }}" + sync: true + notify: RHEL auto-attach subscription + ignore_errors: true # noqa ignore-errors + become: true + no_log: "{{ not (unsafe_show_logs|bool) }}" + when: + - rh_subscription_username is defined + - rh_subscription_status.changed + +# container-selinux is in extras repo +- name: Enable RHEL 7 repos + rhsm_repository: + name: + - "rhel-7-server-rpms" + - "rhel-7-server-extras-rpms" + state: enabled + when: + - rhel_enable_repos | default(True) | bool + - ansible_distribution_major_version == "7" + +# container-selinux is in appstream repo +- name: Enable RHEL 8 repos + rhsm_repository: + name: + - "rhel-8-for-*-baseos-rpms" + - "rhel-8-for-*-appstream-rpms" + state: enabled + when: + - rhel_enable_repos | default(True) | bool + - ansible_distribution_major_version == "8" + +- name: Check presence of fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf + get_attributes: no + get_checksum: no + get_mime: no + register: fastestmirror + +# the fastestmirror plugin can actually slow down Ansible deployments +- name: Disable fastestmirror plugin if requested + lineinfile: + dest: /etc/yum/pluginconf.d/fastestmirror.conf + regexp: "^enabled=.*" + line: "enabled=0" + state: present + become: true + when: + - fastestmirror.stat.exists + - not centos_fastestmirror_enabled + +# libselinux-python is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux python package + package: + name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + state: present + become: true diff --git a/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/main.yml new file mode 100644 index 0000000..853ce09 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/bootstrap-os/tasks/main.yml @@ -0,0 +1,100 @@ +--- +- name: Fetch /etc/os-release + raw: cat /etc/os-release + register: os_release + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- include_tasks: bootstrap-centos.yml + when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-amazon.yml + when: '''ID="amzn"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-redhat.yml + when: '''ID="rhel"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-clearlinux.yml + when: '''ID=clear-linux-os'' in os_release.stdout_lines' + +# Fedora CoreOS +- include_tasks: bootstrap-fedora-coreos.yml + when: + - '''ID=fedora'' in os_release.stdout_lines' + - '''VARIANT_ID=coreos'' in os_release.stdout_lines' + +- include_tasks: bootstrap-flatcar.yml + when: '''ID=flatcar'' in os_release.stdout_lines' + +- include_tasks: bootstrap-debian.yml + when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines' + +# Fedora "classic" +- include_tasks: bootstrap-fedora.yml + when: + - '''ID=fedora'' in os_release.stdout_lines' + - '''VARIANT_ID=coreos'' not in os_release.stdout_lines' + +- include_tasks: bootstrap-opensuse.yml + when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines' + +- name: Create remote_tmp for it is used by another module + file: + path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}" + state: directory + mode: 0700 + +# Workaround for https://github.com/ansible/ansible/issues/42726 +# (1/3) +- name: Gather host facts to get ansible_os_family + setup: + gather_subset: '!all' + filter: ansible_* + +- name: Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora) + hostname: + name: "{{ inventory_hostname }}" + when: + - override_system_hostname + - ansible_os_family not in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + - not ansible_distribution == "Fedora" + - not is_fedora_coreos + +# (2/3) +- name: Assign inventory name to unconfigured hostnames (CoreOS, Flatcar, Suse, ClearLinux and Fedora only) + command: "hostnamectl set-hostname {{ inventory_hostname }}" + register: hostname_changed + become: true + changed_when: false + when: > + override_system_hostname + and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + or is_fedora_coreos + or ansible_distribution == "Fedora") + +# (3/3) +- name: Update hostname fact (CoreOS, Flatcar, Suse, ClearLinux and Fedora only) + setup: + gather_subset: '!all' + filter: ansible_hostname + when: > + override_system_hostname + and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + or is_fedora_coreos + or ansible_distribution == "Fedora") + +- name: Install ceph-commmon package + package: + name: + - ceph-common + state: present + when: rbd_provisioner_enabled|default(false) + +- name: Ensure bash_completion.d folder exists + file: + name: /etc/bash_completion.d/ + state: directory + owner: root + group: root + mode: 0755 diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/README.md b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/README.md new file mode 100644 index 0000000..dda243d --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/README.md @@ -0,0 +1,50 @@ +# Ansible Role: GlusterFS + +[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-glusterfs.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-glusterfs) + +Installs and configures GlusterFS on Linux. + +## Requirements + +For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role). + +This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes. + +## Role Variables + +Available variables are listed below, along with default values (see `defaults/main.yml`): + +```yaml +glusterfs_default_release: "" +``` + +You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy). + +```yaml +glusterfs_ppa_use: yes +glusterfs_ppa_version: "3.5" +``` + +For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info. + +## Dependencies + +None. + +## Example Playbook + +```yaml + - hosts: server + roles: + - geerlingguy.glusterfs +``` + +For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/). + +## License + +MIT / BSD + +## Author Information + +This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/). diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml new file mode 100644 index 0000000..b9f0d2d --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# For Ubuntu. +glusterfs_default_release: "" +glusterfs_ppa_use: yes +glusterfs_ppa_version: "4.1" + +# Gluster configuration. +gluster_mount_dir: /mnt/gluster +gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster +gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick" +gluster_brick_name: gluster diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml new file mode 100644 index 0000000..8d3513f --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml @@ -0,0 +1,30 @@ +--- +dependencies: [] + +galaxy_info: + author: geerlingguy + description: GlusterFS installation for Linux. + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 2.0 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Ubuntu + versions: + - precise + - trusty + - xenial + - name: Debian + versions: + - wheezy + - jessie + galaxy_tags: + - system + - networking + - cloud + - clustering + - files + - sharing diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml new file mode 100644 index 0000000..e6c3dac --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside +# hyperkube and needs to be installed as part of the system. + +# Setup/install tasks. +- include: setup-RedHat.yml + when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined + +- include: setup-Debian.yml + when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined + +- name: Ensure Gluster mount directories exist. + file: "path={{ item }} state=directory mode=0775" + with_items: + - "{{ gluster_mount_dir }}" + when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml new file mode 100644 index 0000000..2865b10 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml @@ -0,0 +1,24 @@ +--- +- name: Add PPA for GlusterFS. + apt_repository: + repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}' + state: present + update_cache: yes + register: glusterfs_ppa_added + when: glusterfs_ppa_use + +- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503 + apt: + name: "{{ item }}" + state: absent + with_items: + - glusterfs-client + when: glusterfs_ppa_added.changed + +- name: Ensure GlusterFS client is installed. + apt: + name: "{{ item }}" + state: present + default_release: "{{ glusterfs_default_release }}" + with_items: + - glusterfs-client diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml new file mode 100644 index 0000000..86827ef --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml @@ -0,0 +1,10 @@ +--- +- name: Install Prerequisites + package: name={{ item }} state=present + with_items: + - "centos-release-gluster{{ glusterfs_default_release }}" + +- name: Install Packages + package: name={{ item }} state=present + with_items: + - glusterfs-client diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml new file mode 100644 index 0000000..ef9a71e --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml @@ -0,0 +1,13 @@ +--- +# For Ubuntu. +glusterfs_default_release: "" +glusterfs_ppa_use: yes +glusterfs_ppa_version: "3.12" + +# Gluster configuration. +gluster_mount_dir: /mnt/gluster +gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster +gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick" +gluster_brick_name: gluster +# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory. +disk_volume_device_1: /dev/vdb diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml new file mode 100644 index 0000000..8d3513f --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml @@ -0,0 +1,30 @@ +--- +dependencies: [] + +galaxy_info: + author: geerlingguy + description: GlusterFS installation for Linux. + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 2.0 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Ubuntu + versions: + - precise + - trusty + - xenial + - name: Debian + versions: + - wheezy + - jessie + galaxy_tags: + - system + - networking + - cloud + - clustering + - files + - sharing diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml new file mode 100644 index 0000000..0a58598 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml @@ -0,0 +1,94 @@ +--- +# Include variables and define needed variables. +- name: Include OS-specific variables. + include_vars: "{{ ansible_os_family }}.yml" + +# Install xfs package +- name: install xfs Debian + apt: name=xfsprogs state=present + when: ansible_os_family == "Debian" + +- name: install xfs RedHat + package: name=xfsprogs state=present + when: ansible_os_family == "RedHat" + +# Format external volumes in xfs +- name: Format volumes in xfs + filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}" + +# Mount external volumes +- name: mounting new xfs filesystem + mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted" + +# Setup/install tasks. +- include: setup-RedHat.yml + when: ansible_os_family == 'RedHat' + +- include: setup-Debian.yml + when: ansible_os_family == 'Debian' + +- name: Ensure GlusterFS is started and enabled at boot. + service: "name={{ glusterfs_daemon }} state=started enabled=yes" + +- name: Ensure Gluster brick and mount directories exist. + file: "path={{ item }} state=directory mode=0775" + with_items: + - "{{ gluster_brick_dir }}" + - "{{ gluster_mount_dir }}" + +- name: Configure Gluster volume with replicas + gluster_volume: + state: present + name: "{{ gluster_brick_name }}" + brick: "{{ gluster_brick_dir }}" + replicas: "{{ groups['gfs-cluster'] | length }}" + cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}" + host: "{{ inventory_hostname }}" + force: yes + run_once: true + when: groups['gfs-cluster']|length > 1 + +- name: Configure Gluster volume without replicas + gluster_volume: + state: present + name: "{{ gluster_brick_name }}" + brick: "{{ gluster_brick_dir }}" + cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}" + host: "{{ inventory_hostname }}" + force: yes + run_once: true + when: groups['gfs-cluster']|length <= 1 + +- name: Mount glusterfs to retrieve disk size + mount: + name: "{{ gluster_mount_dir }}" + src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" + fstype: glusterfs + opts: "defaults,_netdev" + state: mounted + when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] + +- name: Get Gluster disk size + setup: filter=ansible_mounts + register: mounts_data + when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] + +- name: Set Gluster disk size to variable + set_fact: + gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}" + when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] + +- name: Create file on GlusterFS + template: + dest: "{{ gluster_mount_dir }}/.test-file.txt" + src: test-file.txt + mode: 0644 + when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] + +- name: Unmount glusterfs + mount: + name: "{{ gluster_mount_dir }}" + fstype: glusterfs + src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" + state: unmounted + when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml new file mode 100644 index 0000000..855fe36 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml @@ -0,0 +1,26 @@ +--- +- name: Add PPA for GlusterFS. + apt_repository: + repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}' + state: present + update_cache: yes + register: glusterfs_ppa_added + when: glusterfs_ppa_use + +- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503 + apt: + name: "{{ item }}" + state: absent + with_items: + - glusterfs-server + - glusterfs-client + when: glusterfs_ppa_added.changed + +- name: Ensure GlusterFS is installed. + apt: + name: "{{ item }}" + state: present + default_release: "{{ glusterfs_default_release }}" + with_items: + - glusterfs-server + - glusterfs-client diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml new file mode 100644 index 0000000..9dc8f0b --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml @@ -0,0 +1,11 @@ +--- +- name: Install Prerequisites + package: name={{ item }} state=present + with_items: + - "centos-release-gluster{{ glusterfs_default_release }}" + +- name: Install Packages + package: name={{ item }} state=present + with_items: + - glusterfs-server + - glusterfs-client diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/templates/test-file.txt b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/templates/test-file.txt new file mode 100644 index 0000000..16b14f5 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/templates/test-file.txt @@ -0,0 +1 @@ +test file diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml new file mode 100644 index 0000000..e931068 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml @@ -0,0 +1,2 @@ +--- +glusterfs_daemon: glusterd diff --git a/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/RedHat.yml b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/RedHat.yml new file mode 100644 index 0000000..e931068 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/RedHat.yml @@ -0,0 +1,2 @@ +--- +glusterfs_daemon: glusterd diff --git a/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml new file mode 100644 index 0000000..82b0acb --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml @@ -0,0 +1,23 @@ +--- +- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV + template: + src: "{{ item.file }}" + dest: "{{ kube_config_dir }}/{{ item.dest }}" + mode: 0644 + with_items: + - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json} + - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} + - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json} + register: gluster_pv + when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined + +- name: Kubernetes Apps | Set GlusterFS endpoint and PV + kube: + name: glusterfs + namespace: default + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.dest }}" + state: "{{ item.changed | ternary('latest','present') }}" + with_items: "{{ gluster_pv.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined diff --git a/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint-svc.json.j2 b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint-svc.json.j2 new file mode 100644 index 0000000..3cb5118 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint-svc.json.j2 @@ -0,0 +1,12 @@ +{ + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "glusterfs" + }, + "spec": { + "ports": [ + {"port": 1} + ] + } +} diff --git a/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint.json.j2 b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint.json.j2 new file mode 100644 index 0000000..866c09f --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint.json.j2 @@ -0,0 +1,24 @@ +{ + "kind": "Endpoints", + "apiVersion": "v1", + "metadata": { + "name": "glusterfs" + }, + "subsets": [ + {% for host in groups['gfs-cluster'] %} + { + "addresses": [ + { + "ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}" + } + ], + "ports": [ + { + "port": 1 + } + ] + }{%- if not loop.last %}, {% endif -%} + {% endfor %} + ] +} + diff --git a/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-pv.yml.j2 b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-pv.yml.j2 new file mode 100644 index 0000000..f6ba435 --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-pv.yml.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: glusterfs +spec: + capacity: + storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi" + accessModes: + - ReadWriteMany + glusterfs: + endpoints: glusterfs + path: gluster + readOnly: false + persistentVolumeReclaimPolicy: Retain diff --git a/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml new file mode 100644 index 0000000..a4ab33f --- /dev/null +++ b/kubespray/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml @@ -0,0 +1,3 @@ +--- +dependencies: + - {role: kubernetes-pv/ansible, tags: apps} diff --git a/kubespray/contrib/network-storage/heketi/README.md b/kubespray/contrib/network-storage/heketi/README.md new file mode 100644 index 0000000..d5491d3 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/README.md @@ -0,0 +1,27 @@ +# Deploy Heketi/Glusterfs into Kubespray/Kubernetes + +This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass. + +## Important notice + +> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice) + +## Client Setup + +Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine. + +## Install + +Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup. + +```shell +ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml +``` + +## Tear down + +```shell +ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml +``` + +Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system diff --git a/kubespray/contrib/network-storage/heketi/heketi-tear-down.yml b/kubespray/contrib/network-storage/heketi/heketi-tear-down.yml new file mode 100644 index 0000000..9e2d1f4 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/heketi-tear-down.yml @@ -0,0 +1,9 @@ +--- +- hosts: kube_control_plane[0] + roles: + - { role: tear-down } + +- hosts: heketi-node + become: yes + roles: + - { role: tear-down-disks } diff --git a/kubespray/contrib/network-storage/heketi/heketi.yml b/kubespray/contrib/network-storage/heketi/heketi.yml new file mode 100644 index 0000000..2309267 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/heketi.yml @@ -0,0 +1,10 @@ +--- +- hosts: heketi-node + roles: + - { role: prepare } + +- hosts: kube_control_plane[0] + tags: + - "provision" + roles: + - { role: provision } diff --git a/kubespray/contrib/network-storage/heketi/inventory.yml.sample b/kubespray/contrib/network-storage/heketi/inventory.yml.sample new file mode 100644 index 0000000..467788a --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/inventory.yml.sample @@ -0,0 +1,33 @@ +all: + vars: + heketi_admin_key: "11elfeinhundertundelf" + heketi_user_key: "!!einseinseins" + glusterfs_daemonset: + readiness_probe: + timeout_seconds: 3 + initial_delay_seconds: 3 + liveness_probe: + timeout_seconds: 3 + initial_delay_seconds: 10 + children: + k8s_cluster: + vars: + kubelet_fail_swap_on: false + children: + kube_control_plane: + hosts: + node1: + etcd: + hosts: + node2: + kube_node: + hosts: &kube_nodes + node1: + node2: + node3: + node4: + heketi-node: + vars: + disk_volume_device_1: "/dev/vdb" + hosts: + <<: *kube_nodes diff --git a/kubespray/contrib/network-storage/heketi/requirements.txt b/kubespray/contrib/network-storage/heketi/requirements.txt new file mode 100644 index 0000000..45c1e03 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/requirements.txt @@ -0,0 +1 @@ +jmespath diff --git a/kubespray/contrib/network-storage/heketi/roles/prepare/tasks/main.yml b/kubespray/contrib/network-storage/heketi/roles/prepare/tasks/main.yml new file mode 100644 index 0000000..dad3bae --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/prepare/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: "Load lvm kernel modules" + become: true + with_items: + - "dm_snapshot" + - "dm_mirror" + - "dm_thin_pool" + modprobe: + name: "{{ item }}" + state: "present" + +- name: "Install glusterfs mount utils (RedHat)" + become: true + package: + name: "glusterfs-fuse" + state: "present" + when: "ansible_os_family == 'RedHat'" + +- name: "Install glusterfs mount utils (Debian)" + become: true + apt: + name: "glusterfs-client" + state: "present" + when: "ansible_os_family == 'Debian'" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/defaults/main.yml b/kubespray/contrib/network-storage/heketi/roles/provision/defaults/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/handlers/main.yml b/kubespray/contrib/network-storage/heketi/roles/provision/handlers/main.yml new file mode 100644 index 0000000..9e876de --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: "stop port forwarding" + command: "killall " diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml new file mode 100644 index 0000000..f0111ce --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml @@ -0,0 +1,64 @@ +--- +# Bootstrap heketi +- name: "Get state of heketi service, deployment and pods." + register: "initial_heketi_state" + changed_when: false + command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json" + +- name: "Bootstrap heketi." + when: + - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0" + - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0" + - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0" + include_tasks: "bootstrap/deploy.yml" + +# Prepare heketi topology +- name: "Get heketi initial pod state." + register: "initial_heketi_pod" + command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json" + changed_when: false + +- name: "Ensure heketi bootstrap pod is up." + assert: + that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1" + +- name: Store the initial heketi pod name + set_fact: + initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}" + +- name: "Test heketi topology." + changed_when: false + register: "heketi_topology" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" + +- name: "Load heketi topology." + when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0" + include_tasks: "bootstrap/topology.yml" + +# Provision heketi database volume +- name: "Prepare heketi volumes." + include_tasks: "bootstrap/volumes.yml" + +# Remove bootstrap heketi +- name: "Tear down bootstrap." + include_tasks: "bootstrap/tear-down.yml" + +# Prepare heketi storage +- name: "Test heketi storage." + command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json" + changed_when: false + register: "heketi_storage_state" + +# ensure endpoints actually exist before trying to move database data to it +- name: "Create heketi storage." + include_tasks: "bootstrap/storage.yml" + vars: + secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']" + endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']" + service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']" + job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']" + when: + - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml new file mode 100644 index 0000000..8d03ffc --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml @@ -0,0 +1,27 @@ +--- +- name: "Kubernetes Apps | Lay Down Heketi Bootstrap" + become: true + template: + src: "heketi-bootstrap.json.j2" + dest: "{{ kube_config_dir }}/heketi-bootstrap.json" + mode: 0640 + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi Bootstrap" + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/heketi-bootstrap.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" +- name: "Wait for heketi bootstrap to complete." + changed_when: false + register: "initial_heketi_state" + vars: + initial_heketi_state: { stdout: "{}" } + pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]" + deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" + command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json" + until: + - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'" + - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" + retries: 60 + delay: 5 diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml new file mode 100644 index 0000000..63a475a --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml @@ -0,0 +1,33 @@ +--- +- name: "Test heketi storage." + command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json" + changed_when: false + register: "heketi_storage_state" +- name: "Create heketi storage." + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json" + state: "present" + vars: + secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']" + endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']" + service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']" + job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']" + when: + - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0" + register: "heketi_storage_result" +- name: "Get state of heketi database copy job." + command: "{{ bin_dir }}/kubectl get jobs --output=json" + changed_when: false + register: "heketi_storage_state" + vars: + heketi_storage_state: { stdout: "{}" } + job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]" + until: + - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1" + retries: 60 + delay: 5 diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml new file mode 100644 index 0000000..e6b16e5 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml @@ -0,0 +1,14 @@ +--- +- name: "Get existing Heketi deploy resources." + command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json" + register: "heketi_resources" + changed_when: false +- name: "Delete bootstrap Heketi." + command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" + when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" +- name: "Ensure there is nothing left over." # noqa 301 + command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml new file mode 100644 index 0000000..4c6dc13 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml @@ -0,0 +1,27 @@ +--- +- name: "Get heketi topology." + changed_when: false + register: "heketi_topology" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" +- name: "Render heketi topology template." + become: true + vars: { nodes: "{{ groups['heketi-node'] }}" } + register: "render" + template: + src: "topology.json.j2" + dest: "{{ kube_config_dir }}/topology.json" + mode: 0644 +- name: "Copy topology configuration into container." + changed_when: false + command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" +- name: "Load heketi topology." # noqa 503 + when: "render.changed" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" + register: "load_heketi" +- name: "Get heketi topology." + changed_when: false + register: "heketi_topology" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" + until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length" + retries: 60 + delay: 5 diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml new file mode 100644 index 0000000..dc93d78 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml @@ -0,0 +1,41 @@ +--- +- name: "Get heketi volume ids." + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json" + changed_when: false + register: "heketi_volumes" +- name: "Get heketi volumes." + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json" + with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}" + loop_control: { loop_var: "volume_id" } + register: "volumes_information" +- name: "Test heketi database volume." + set_fact: { heketi_database_volume_exists: true } + with_items: "{{ volumes_information.results }}" + loop_control: { loop_var: "volume_information" } + vars: { volume: "{{ volume_information.stdout|from_json }}" } + when: "volume.name == 'heketidbstorage'" +- name: "Provision database volume." + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" + when: "heketi_database_volume_exists is undefined" +- name: "Copy configuration from pod." # noqa 301 + become: true + command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json" +- name: "Get heketi volume ids." + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json" + changed_when: false + register: "heketi_volumes" +- name: "Get heketi volumes." + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json" + with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}" + loop_control: { loop_var: "volume_id" } + register: "volumes_information" +- name: "Test heketi database volume." + set_fact: { heketi_database_volume_created: true } + with_items: "{{ volumes_information.results }}" + loop_control: { loop_var: "volume_information" } + vars: { volume: "{{ volume_information.stdout|from_json }}" } + when: "volume.name == 'heketidbstorage'" +- name: "Ensure heketi database volume exists." + assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." } diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml new file mode 100644 index 0000000..238f29b --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml @@ -0,0 +1,4 @@ +--- +- name: "Clean up left over jobs." + command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\"" + changed_when: false diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml new file mode 100644 index 0000000..3409cf9 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml @@ -0,0 +1,44 @@ +--- +- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset" + template: + src: "glusterfs-daemonset.json.j2" + dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" + mode: 0644 + become: true + register: "rendering" +- name: "Kubernetes Apps | Install and configure GlusterFS daemonset" + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/glusterfs-daemonset.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" +- name: "Kubernetes Apps | Label GlusterFS nodes" + include_tasks: "glusterfs/label.yml" + with_items: "{{ groups['heketi-node'] }}" + loop_control: + loop_var: "node" +- name: "Kubernetes Apps | Wait for daemonset to become available." + register: "daemonset_state" + command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true" + changed_when: false + vars: + daemonset_state: { stdout: "{}" } + ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}" + desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}" + until: "ready | int >= 3" + retries: 60 + delay: 5 + +- name: "Kubernetes Apps | Lay Down Heketi Service Account" + template: + src: "heketi-service-account.json.j2" + dest: "{{ kube_config_dir }}/heketi-service-account.json" + mode: 0644 + become: true + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi Service Account" + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/heketi-service-account.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml new file mode 100644 index 0000000..ae598c3 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml @@ -0,0 +1,19 @@ +--- +- name: Get storage nodes + register: "label_present" + command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true" + changed_when: false + +- name: "Assign storage label" + when: "label_present.stdout_lines|length == 0" + command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs" + +- name: Get storage nodes again + register: "label_present" + command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true" + changed_when: false + +- name: Ensure the label has been set + assert: + that: "label_present|length > 0" + msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml new file mode 100644 index 0000000..9a6ce55 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml @@ -0,0 +1,34 @@ +--- +- name: "Kubernetes Apps | Lay Down Heketi" + become: true + template: + src: "heketi-deployment.json.j2" + dest: "{{ kube_config_dir }}/heketi-deployment.json" + mode: 0644 + register: "rendering" + +- name: "Kubernetes Apps | Install and configure Heketi" + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/heketi-deployment.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" + +- name: "Ensure heketi is up and running." + changed_when: false + register: "heketi_state" + vars: + heketi_state: + stdout: "{}" + pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]" + deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" + command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json" + until: + - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'" + - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" + retries: 60 + delay: 5 + +- name: Set the Heketi pod name + set_fact: + heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/main.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/main.yml new file mode 100644 index 0000000..1feb27d --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: "Kubernetes Apps | GlusterFS" + include_tasks: "glusterfs.yml" + +- name: "Kubernetes Apps | Heketi Secrets" + include_tasks: "secret.yml" + +- name: "Kubernetes Apps | Test Heketi" + register: "heketi_service_state" + command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true" + changed_when: false + +- name: "Kubernetes Apps | Bootstrap Heketi" + when: "heketi_service_state.stdout == \"\"" + include_tasks: "bootstrap.yml" + +- name: "Kubernetes Apps | Heketi" + include_tasks: "heketi.yml" + +- name: "Kubernetes Apps | Heketi Topology" + include_tasks: "topology.yml" + +- name: "Kubernetes Apps | Heketi Storage" + include_tasks: "storage.yml" + +- name: "Kubernetes Apps | Storage Class" + include_tasks: "storageclass.yml" + +- name: "Clean up" + include_tasks: "cleanup.yml" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/secret.yml new file mode 100644 index 0000000..c455b6f --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/secret.yml @@ -0,0 +1,45 @@ +--- +- name: Get clusterrolebindings + register: "clusterrolebinding_state" + command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" + changed_when: false + +- name: "Kubernetes Apps | Deploy cluster role binding." + when: "clusterrolebinding_state.stdout | length == 0" + command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account" + +- name: Get clusterrolebindings again + register: "clusterrolebinding_state" + command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" + changed_when: false + +- name: Make sure that clusterrolebindings are present now + assert: + that: "clusterrolebinding_state.stdout | length > 0" + msg: "Cluster role binding is not present." + +- name: Get the heketi-config-secret secret + register: "secret_state" + command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" + changed_when: false + +- name: "Render Heketi secret configuration." + become: true + template: + src: "heketi.json.j2" + dest: "{{ kube_config_dir }}/heketi.json" + mode: 0644 + +- name: "Deploy Heketi config secret" + when: "secret_state.stdout | length == 0" + command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json" + +- name: Get the heketi-config-secret secret again + register: "secret_state" + command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" + changed_when: false + +- name: Make sure the heketi-config-secret secret exists now + assert: + that: "secret_state.stdout | length > 0" + msg: "Heketi config secret is not present." diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/storage.yml new file mode 100644 index 0000000..055e179 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/storage.yml @@ -0,0 +1,15 @@ +--- +- name: "Kubernetes Apps | Lay Down Heketi Storage" + become: true + vars: { nodes: "{{ groups['heketi-node'] }}" } + template: + src: "heketi-storage.json.j2" + dest: "{{ kube_config_dir }}/heketi-storage.json" + mode: 0644 + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi Storage" + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/heketi-storage.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml new file mode 100644 index 0000000..3380a61 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml @@ -0,0 +1,26 @@ +--- +- name: "Test storage class." + command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json" + register: "storageclass" + changed_when: false +- name: "Test heketi service." + command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json" + register: "heketi_service" + changed_when: false +- name: "Ensure heketi service is available." + assert: { that: "heketi_service.stdout != \"\"" } +- name: "Render storage class configuration." + become: true + vars: + endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}" + template: + src: "storageclass.yml.j2" + dest: "{{ kube_config_dir }}/storageclass.yml" + mode: 0644 + register: "rendering" +- name: "Kubernetes Apps | Install and configure Storace Class" + kube: + name: "GlusterFS" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/storageclass.yml" + state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/topology.yml new file mode 100644 index 0000000..f20af1f --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/tasks/topology.yml @@ -0,0 +1,26 @@ +--- +- name: "Get heketi topology." + register: "heketi_topology" + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" +- name: "Render heketi topology template." + become: true + vars: { nodes: "{{ groups['heketi-node'] }}" } + register: "rendering" + template: + src: "topology.json.j2" + dest: "{{ kube_config_dir }}/topology.json" + mode: 0644 +- name: "Copy topology configuration into container." # noqa 503 + when: "rendering.changed" + command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" +- name: "Load heketi topology." # noqa 503 + when: "rendering.changed" + command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" +- name: "Get heketi topology." + register: "heketi_topology" + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" + until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length" + retries: 60 + delay: 5 diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 new file mode 100644 index 0000000..a14b31c --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 @@ -0,0 +1,149 @@ +{ + "kind": "DaemonSet", + "apiVersion": "apps/v1", + "metadata": { + "name": "glusterfs", + "labels": { + "glusterfs": "deployment" + }, + "annotations": { + "description": "GlusterFS Daemon Set", + "tags": "glusterfs" + } + }, + "spec": { + "selector": { + "matchLabels": { + "glusterfs-node": "daemonset" + } + }, + "template": { + "metadata": { + "name": "glusterfs", + "labels": { + "glusterfs-node": "daemonset" + } + }, + "spec": { + "nodeSelector": { + "storagenode" : "glusterfs" + }, + "hostNetwork": true, + "containers": [ + { + "image": "gluster/gluster-centos:gluster4u0_centos7", + "imagePullPolicy": "IfNotPresent", + "name": "glusterfs", + "volumeMounts": [ + { + "name": "glusterfs-heketi", + "mountPath": "/var/lib/heketi" + }, + { + "name": "glusterfs-run", + "mountPath": "/run" + }, + { + "name": "glusterfs-lvm", + "mountPath": "/run/lvm" + }, + { + "name": "glusterfs-etc", + "mountPath": "/etc/glusterfs" + }, + { + "name": "glusterfs-logs", + "mountPath": "/var/log/glusterfs" + }, + { + "name": "glusterfs-config", + "mountPath": "/var/lib/glusterd" + }, + { + "name": "glusterfs-dev", + "mountPath": "/dev" + }, + { + "name": "glusterfs-cgroup", + "mountPath": "/sys/fs/cgroup" + } + ], + "securityContext": { + "capabilities": {}, + "privileged": true + }, + "readinessProbe": { + "timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }}, + "initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }}, + "exec": { + "command": [ + "/bin/bash", + "-c", + "systemctl status glusterd.service" + ] + } + }, + "livenessProbe": { + "timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }}, + "initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }}, + "exec": { + "command": [ + "/bin/bash", + "-c", + "systemctl status glusterd.service" + ] + } + } + } + ], + "volumes": [ + { + "name": "glusterfs-heketi", + "hostPath": { + "path": "/var/lib/heketi" + } + }, + { + "name": "glusterfs-run" + }, + { + "name": "glusterfs-lvm", + "hostPath": { + "path": "/run/lvm" + } + }, + { + "name": "glusterfs-etc", + "hostPath": { + "path": "/etc/glusterfs" + } + }, + { + "name": "glusterfs-logs", + "hostPath": { + "path": "/var/log/glusterfs" + } + }, + { + "name": "glusterfs-config", + "hostPath": { + "path": "/var/lib/glusterd" + } + }, + { + "name": "glusterfs-dev", + "hostPath": { + "path": "/dev" + } + }, + { + "name": "glusterfs-cgroup", + "hostPath": { + "path": "/sys/fs/cgroup" + } + } + ] + } + } + } +} diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 new file mode 100644 index 0000000..7a932d0 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 @@ -0,0 +1,138 @@ +{ + "kind": "List", + "apiVersion": "v1", + "items": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "deploy-heketi", + "labels": { + "glusterfs": "heketi-service", + "deploy-heketi": "support" + }, + "annotations": { + "description": "Exposes Heketi Service" + } + }, + "spec": { + "selector": { + "name": "deploy-heketi" + }, + "ports": [ + { + "name": "deploy-heketi", + "port": 8080, + "targetPort": 8080 + } + ] + } + }, + { + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "deploy-heketi", + "labels": { + "glusterfs": "heketi-deployment", + "deploy-heketi": "deployment" + }, + "annotations": { + "description": "Defines how to deploy Heketi" + } + }, + "spec": { + "selector": { + "matchLabels": { + "name": "deploy-heketi" + } + }, + "replicas": 1, + "template": { + "metadata": { + "name": "deploy-heketi", + "labels": { + "name": "deploy-heketi", + "glusterfs": "heketi-pod", + "deploy-heketi": "pod" + } + }, + "spec": { + "serviceAccountName": "heketi-service-account", + "containers": [ + { + "image": "heketi/heketi:9", + "imagePullPolicy": "Always", + "name": "deploy-heketi", + "env": [ + { + "name": "HEKETI_EXECUTOR", + "value": "kubernetes" + }, + { + "name": "HEKETI_DB_PATH", + "value": "/var/lib/heketi/heketi.db" + }, + { + "name": "HEKETI_FSTAB", + "value": "/var/lib/heketi/fstab" + }, + { + "name": "HEKETI_SNAPSHOT_LIMIT", + "value": "14" + }, + { + "name": "HEKETI_KUBE_GLUSTER_DAEMONSET", + "value": "y" + } + ], + "ports": [ + { + "containerPort": 8080 + } + ], + "volumeMounts": [ + { + "name": "db", + "mountPath": "/var/lib/heketi" + }, + { + "name": "config", + "mountPath": "/etc/heketi" + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/hello", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 10, + "httpGet": { + "path": "/hello", + "port": 8080 + } + } + } + ], + "volumes": [ + { + "name": "db" + }, + { + "name": "config", + "secret": { + "secretName": "heketi-config-secret" + } + } + ] + } + } + } + } + ] +} diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 new file mode 100644 index 0000000..8e09ce8 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 @@ -0,0 +1,164 @@ +{ + "kind": "List", + "apiVersion": "v1", + "items": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "heketi-db-backup", + "labels": { + "glusterfs": "heketi-db", + "heketi": "db" + } + }, + "data": { + }, + "type": "Opaque" + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "heketi", + "labels": { + "glusterfs": "heketi-service", + "deploy-heketi": "support" + }, + "annotations": { + "description": "Exposes Heketi Service" + } + }, + "spec": { + "selector": { + "name": "heketi" + }, + "ports": [ + { + "name": "heketi", + "port": 8080, + "targetPort": 8080 + } + ] + } + }, + { + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "heketi", + "labels": { + "glusterfs": "heketi-deployment" + }, + "annotations": { + "description": "Defines how to deploy Heketi" + } + }, + "spec": { + "selector": { + "matchLabels": { + "name": "heketi" + } + }, + "replicas": 1, + "template": { + "metadata": { + "name": "heketi", + "labels": { + "name": "heketi", + "glusterfs": "heketi-pod" + } + }, + "spec": { + "serviceAccountName": "heketi-service-account", + "containers": [ + { + "image": "heketi/heketi:9", + "imagePullPolicy": "Always", + "name": "heketi", + "env": [ + { + "name": "HEKETI_EXECUTOR", + "value": "kubernetes" + }, + { + "name": "HEKETI_DB_PATH", + "value": "/var/lib/heketi/heketi.db" + }, + { + "name": "HEKETI_FSTAB", + "value": "/var/lib/heketi/fstab" + }, + { + "name": "HEKETI_SNAPSHOT_LIMIT", + "value": "14" + }, + { + "name": "HEKETI_KUBE_GLUSTER_DAEMONSET", + "value": "y" + } + ], + "ports": [ + { + "containerPort": 8080 + } + ], + "volumeMounts": [ + { + "mountPath": "/backupdb", + "name": "heketi-db-secret" + }, + { + "name": "db", + "mountPath": "/var/lib/heketi" + }, + { + "name": "config", + "mountPath": "/etc/heketi" + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/hello", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 10, + "httpGet": { + "path": "/hello", + "port": 8080 + } + } + } + ], + "volumes": [ + { + "name": "db", + "glusterfs": { + "endpoints": "heketi-storage-endpoints", + "path": "heketidbstorage" + } + }, + { + "name": "heketi-db-secret", + "secret": { + "secretName": "heketi-db-backup" + } + }, + { + "name": "config", + "secret": { + "secretName": "heketi-config-secret" + } + } + ] + } + } + } + } + ] +} diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 new file mode 100644 index 0000000..1dbcb9e --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 @@ -0,0 +1,7 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "name": "heketi-service-account" + } +} diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 new file mode 100644 index 0000000..e985d25 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 @@ -0,0 +1,54 @@ +{ + "apiVersion": "v1", + "kind": "List", + "items": [ + { + "kind": "Endpoints", + "apiVersion": "v1", + "metadata": { + "name": "heketi-storage-endpoints", + "creationTimestamp": null + }, + "subsets": [ +{% set nodeblocks = [] %} +{% for node in nodes %} +{% set nodeblock %} + { + "addresses": [ + { + "ip": "{{ hostvars[node].ip }}" + } + ], + "ports": [ + { + "port": 1 + } + ] + } +{% endset %} +{% if nodeblocks.append(nodeblock) %}{% endif %} +{% endfor %} +{{ nodeblocks|join(',') }} + ] + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "heketi-storage-endpoints", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "port": 1, + "targetPort": 0 + } + ] + }, + "status": { + "loadBalancer": {} + } + } + ] +} diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 new file mode 100644 index 0000000..5861b68 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 @@ -0,0 +1,44 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port": "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth": true, + + "_jwt": "Private keys for access", + "jwt": { + "_admin": "Admin has access to all APIs", + "admin": { + "key": "{{ heketi_admin_key }}" + }, + "_user": "User only has access to /volumes endpoint", + "user": { + "key": "{{ heketi_user_key }}" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs": { + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor": "kubernetes", + + "_db_comment": "Database file name", + "db": "/var/lib/heketi/heketi.db", + + "kubeexec": { + "rebalance_on_expansion": true + }, + + "sshexec": { + "rebalance_on_expansion": true, + "keyfile": "/etc/heketi/private_key", + "fstab": "/etc/fstab", + "port": "22", + "user": "root", + "sudo": false + } + }, + + "_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.", + "backup_db_to_kube_secret": false +} diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 new file mode 100644 index 0000000..c2b64cf --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gluster + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{{ endpoint_address }}:8080" + restuser: "admin" + restuserkey: "{{ heketi_admin_key }}" diff --git a/kubespray/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 b/kubespray/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 new file mode 100644 index 0000000..c19ce32 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 @@ -0,0 +1,34 @@ +{ + "clusters": [ + { + "nodes": [ +{% set nodeblocks = [] %} +{% for node in nodes %} +{% set nodeblock %} + { + "node": { + "hostnames": { + "manage": [ + "{{ node }}" + ], + "storage": [ + "{{ hostvars[node].ip }}" + ] + }, + "zone": 1 + }, + "devices": [ + { + "name": "{{ hostvars[node]['disk_volume_device_1'] }}", + "destroydata": false + } + ] + } +{% endset %} +{% if nodeblocks.append(nodeblock) %}{% endif %} +{% endfor %} +{{ nodeblocks|join(',') }} + ] + } + ] +} diff --git a/kubespray/contrib/network-storage/heketi/roles/tear-down-disks/defaults/main.yml b/kubespray/contrib/network-storage/heketi/roles/tear-down-disks/defaults/main.yml new file mode 100644 index 0000000..c07ba2d --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/tear-down-disks/defaults/main.yml @@ -0,0 +1,2 @@ +--- +heketi_remove_lvm: false diff --git a/kubespray/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/kubespray/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml new file mode 100644 index 0000000..ae98bd8 --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: "Install lvm utils (RedHat)" + become: true + package: + name: "lvm2" + state: "present" + when: "ansible_os_family == 'RedHat'" + +- name: "Install lvm utils (Debian)" + become: true + apt: + name: "lvm2" + state: "present" + when: "ansible_os_family == 'Debian'" + +- name: "Get volume group information." + environment: + PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management + become: true + shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2" + register: "volume_groups" + ignore_errors: true # noqa ignore-errors + changed_when: false + +- name: "Remove volume groups." # noqa 301 + environment: + PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management + become: true + command: "vgremove {{ volume_group }} --yes" + with_items: "{{ volume_groups.stdout_lines }}" + loop_control: { loop_var: "volume_group" } + +- name: "Remove physical volume from cluster disks." # noqa 301 + environment: + PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management + become: true + command: "pvremove {{ disk_volume_device_1 }} --yes" + ignore_errors: true # noqa ignore-errors + +- name: "Remove lvm utils (RedHat)" + become: true + package: + name: "lvm2" + state: "absent" + when: "ansible_os_family == 'RedHat' and heketi_remove_lvm" + +- name: "Remove lvm utils (Debian)" + become: true + apt: + name: "lvm2" + state: "absent" + when: "ansible_os_family == 'Debian' and heketi_remove_lvm" diff --git a/kubespray/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/kubespray/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml new file mode 100644 index 0000000..608b25d --- /dev/null +++ b/kubespray/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- name: Remove storage class. # noqa 301 + command: "{{ bin_dir }}/kubectl delete storageclass gluster" + ignore_errors: true # noqa ignore-errors +- name: Tear down heketi. # noqa 301 + command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" + ignore_errors: true # noqa ignore-errors +- name: Tear down heketi. # noqa 301 + command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" + ignore_errors: true # noqa ignore-errors +- name: Tear down bootstrap. + include_tasks: "../../provision/tasks/bootstrap/tear-down.yml" +- name: Ensure there is nothing left over. # noqa 301 + command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 +- name: Ensure there is nothing left over. # noqa 301 + command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 +- name: Tear down glusterfs. # noqa 301 + command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" + ignore_errors: true # noqa ignore-errors +- name: Remove heketi storage service. # noqa 301 + command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" + ignore_errors: true # noqa ignore-errors +- name: Remove heketi gluster role binding # noqa 301 + command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" + ignore_errors: true # noqa ignore-errors +- name: Remove heketi config secret # noqa 301 + command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" + ignore_errors: true # noqa ignore-errors +- name: Remove heketi db backup # noqa 301 + command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" + ignore_errors: true # noqa ignore-errors +- name: Remove heketi service account # noqa 301 + command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" + ignore_errors: true # noqa ignore-errors +- name: Get secrets + command: "{{ bin_dir }}/kubectl get secrets --output=\"json\"" + register: "secrets" + changed_when: false +- name: Remove heketi storage secret + vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" } + command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}" + when: "storage_query is defined" + ignore_errors: true # noqa ignore-errors diff --git a/kubespray/contrib/offline/README.md b/kubespray/contrib/offline/README.md new file mode 100644 index 0000000..6ea7885 --- /dev/null +++ b/kubespray/contrib/offline/README.md @@ -0,0 +1,65 @@ +# Offline deployment + +## manage-offline-container-images.sh + +Container image collecting script for offline deployment + +This script has two features: +(1) Get container images from an environment which is deployed online. +(2) Deploy local container registry and register the container images to the registry. + +Step(1) should be done online site as a preparation, then we bring the gotten images +to the target offline environment. if images are from a private registry, +you need to set `PRIVATE_REGISTRY` environment variable. +Then we will run step(2) for registering the images to local registry. + +Step(1) can be operated with: + +```shell +manage-offline-container-images.sh create +``` + +Step(2) can be operated with: + +```shell +manage-offline-container-images.sh register +``` + +## generate_list.sh + +This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file. + +Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files, +all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template. + +```shell +./generate_list.sh +tree temp +temp +├── files.list +├── files.list.template +├── images.list +└── images.list.template +0 directories, 5 files +``` + +In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars, +then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list. + +## manage-offline-files.sh + +This script will download all files according to `temp/files.list` and run nginx container to provide offline file download. + +Step(1) generate `files.list` + +```shell +./generate_list.sh +``` + +Step(2) download files and run nginx container + +```shell +./manage-offline-files.sh +``` + +when nginx container is running, it can be accessed through . diff --git a/kubespray/contrib/offline/docker-daemon.json b/kubespray/contrib/offline/docker-daemon.json new file mode 100644 index 0000000..84ddb60 --- /dev/null +++ b/kubespray/contrib/offline/docker-daemon.json @@ -0,0 +1 @@ +{ "insecure-registries":["HOSTNAME:5000"] } diff --git a/kubespray/contrib/offline/generate_list.sh b/kubespray/contrib/offline/generate_list.sh new file mode 100755 index 0000000..acbd9fc --- /dev/null +++ b/kubespray/contrib/offline/generate_list.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -eo pipefail + +CURRENT_DIR=$(cd $(dirname $0); pwd) +TEMP_DIR="${CURRENT_DIR}/temp" +REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}" + +: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"} + +mkdir -p ${TEMP_DIR} + +# generate all download files url template +grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \ + | sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template + +# generate all images list template +sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \ + | sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \ + | sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template + +# add kube-* images to images list template +# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml +# doesn't contain those images. That is reason why here needs to put those images into the +# list separately. +KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy" +for i in $KUBE_IMAGES; do + echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template +done + +# run ansible to expand templates +/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR} + +(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1 diff --git a/kubespray/contrib/offline/generate_list.yml b/kubespray/contrib/offline/generate_list.yml new file mode 100644 index 0000000..c3458e6 --- /dev/null +++ b/kubespray/contrib/offline/generate_list.yml @@ -0,0 +1,19 @@ +--- +- hosts: localhost + become: no + + roles: + # Just load default variables from roles. + - role: kubespray-defaults + when: false + - role: download + when: false + + tasks: + # Generate files.list and images.list files from templates. + - template: + src: ./contrib/offline/temp/{{ item }}.list.template + dest: ./contrib/offline/temp/{{ item }}.list + with_items: + - files + - images diff --git a/kubespray/contrib/offline/manage-offline-container-images.sh b/kubespray/contrib/offline/manage-offline-container-images.sh new file mode 100755 index 0000000..40ff2c2 --- /dev/null +++ b/kubespray/contrib/offline/manage-offline-container-images.sh @@ -0,0 +1,172 @@ +#!/bin/bash + +OPTION=$1 +CURRENT_DIR=$(cd $(dirname $0); pwd) +TEMP_DIR="${CURRENT_DIR}/temp" + +IMAGE_TAR_FILE="${CURRENT_DIR}/container-images.tar.gz" +IMAGE_DIR="${CURRENT_DIR}/container-images" +IMAGE_LIST="${IMAGE_DIR}/container-images.txt" +RETRY_COUNT=5 + +function create_container_image_tar() { + set -e + + IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq) + # NOTE: etcd and pause cannot be seen as pods. + # The pause image is used for --pod-infra-container-image option of kubelet. + EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g) + IMAGES="${IMAGES} ${EXT_IMAGES}" + + rm -f ${IMAGE_TAR_FILE} + rm -rf ${IMAGE_DIR} + mkdir ${IMAGE_DIR} + cd ${IMAGE_DIR} + + sudo docker pull registry:latest + sudo docker save -o registry-latest.tar registry:latest + + for image in ${IMAGES} + do + FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar + set +e + for step in $(seq 1 ${RETRY_COUNT}) + do + sudo docker pull ${image} + if [ $? -eq 0 ]; then + break + fi + echo "Failed to pull ${image} at step ${step}" + if [ ${step} -eq ${RETRY_COUNT} ]; then + exit 1 + fi + done + set -e + sudo docker save -o ${FILE_NAME} ${image} + + # NOTE: Here removes the following repo parts from each image + # so that these parts will be replaced with Kubespray. + # - kube_image_repo: "registry.k8s.io" + # - gcr_image_repo: "gcr.io" + # - docker_image_repo: "docker.io" + # - quay_image_repo: "quay.io" + FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}') + if [ "${FIRST_PART}" = "registry.k8s.io" ] || + [ "${FIRST_PART}" = "gcr.io" ] || + [ "${FIRST_PART}" = "docker.io" ] || + [ "${FIRST_PART}" = "quay.io" ] || + [ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then + image=$(echo ${image} | sed s@"${FIRST_PART}/"@@) + fi + echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST} + done + + cd .. + sudo chown ${USER} ${IMAGE_DIR}/* + tar -zcvf ${IMAGE_TAR_FILE} ./container-images + rm -rf ${IMAGE_DIR} + + echo "" + echo "${IMAGE_TAR_FILE} is created to contain your container images." + echo "Please keep this file and bring it to your offline environment." +} + +function register_container_images() { + if [ ! -f ${IMAGE_TAR_FILE} ]; then + echo "${IMAGE_TAR_FILE} should exist." + exit 1 + fi + if [ ! -d ${TEMP_DIR} ]; then + mkdir ${TEMP_DIR} + fi + + # To avoid "http: server gave http response to https client" error. + LOCALHOST_NAME=$(hostname) + if [ -d /etc/docker/ ]; then + set -e + # Ubuntu18.04, RHEL7/CentOS7 + cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json + sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json + sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json + elif [ -d /etc/containers/ ]; then + set -e + # RHEL8/CentOS8 + cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf + sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf + sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf + else + echo "docker package(docker-ce, etc.) should be installed" + exit 1 + fi + + tar -zxvf ${IMAGE_TAR_FILE} + sudo docker load -i ${IMAGE_DIR}/registry-latest.tar + set +e + sudo docker container inspect registry >/dev/null 2>&1 + if [ $? -ne 0 ]; then + sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest + fi + set -e + + while read -r line; do + file_name=$(echo ${line} | awk '{print $1}') + raw_image=$(echo ${line} | awk '{print $2}') + new_image="${LOCALHOST_NAME}:5000/${raw_image}" + org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}') + image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//) + if [ -z "${file_name}" ]; then + echo "Failed to get file_name for line ${line}" + exit 1 + fi + if [ -z "${raw_image}" ]; then + echo "Failed to get raw_image for line ${line}" + exit 1 + fi + if [ -z "${org_image}" ]; then + echo "Failed to get org_image for line ${line}" + exit 1 + fi + if [ -z "${image_id}" ]; then + echo "Failed to get image_id for file ${file_name}" + exit 1 + fi + sudo docker load -i ${IMAGE_DIR}/${file_name} + sudo docker tag ${image_id} ${new_image} + sudo docker push ${new_image} + done <<< "$(cat ${IMAGE_LIST})" + + echo "Succeeded to register container images to local registry." + echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:" + echo "- kube_image_repo" + echo "- gcr_image_repo" + echo "- docker_image_repo" + echo "- quay_image_repo" +} + +if [ "${OPTION}" == "create" ]; then + create_container_image_tar +elif [ "${OPTION}" == "register" ]; then + register_container_images +else + echo "This script has two features:" + echo "(1) Get container images from an environment which is deployed online." + echo "(2) Deploy local container registry and register the container images to the registry." + echo "" + echo "Step(1) should be done online site as a preparation, then we bring" + echo "the gotten images to the target offline environment. if images are from" + echo "a private registry, you need to set PRIVATE_REGISTRY environment variable." + echo "Then we will run step(2) for registering the images to local registry." + echo "" + echo "${IMAGE_TAR_FILE} is created to contain your container images." + echo "Please keep this file and bring it to your offline environment." + echo "" + echo "Step(1) can be operated with:" + echo " $ ./manage-offline-container-images.sh create" + echo "" + echo "Step(2) can be operated with:" + echo " $ ./manage-offline-container-images.sh register" + echo "" + echo "Please specify 'create' or 'register'." + echo "" + exit 1 +fi diff --git a/kubespray/contrib/offline/manage-offline-files.sh b/kubespray/contrib/offline/manage-offline-files.sh new file mode 100755 index 0000000..e949c70 --- /dev/null +++ b/kubespray/contrib/offline/manage-offline-files.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +CURRENT_DIR=$( dirname "$(readlink -f "$0")" ) +OFFLINE_FILES_DIR_NAME="offline-files" +OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}" +OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz" +FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"} +NGINX_PORT=8080 + +# download files +if [ ! -f "${FILES_LIST}" ]; then + echo "${FILES_LIST} should exist, run ./generate_list.sh first." + exit 1 +fi + +rm -rf "${OFFLINE_FILES_DIR}" +rm "${OFFLINE_FILES_ARCHIVE}" +mkdir "${OFFLINE_FILES_DIR}" + +wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}" +tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}" + +[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0 + +# run nginx container server +if command -v nerdctl 1>/dev/null 2>&1; then + runtime="nerdctl" +elif command -v podman 1>/dev/null 2>&1; then + runtime="podman" +elif command -v docker 1>/dev/null 2>&1; then + runtime="docker" +else + echo "No supported container runtime found" + exit 1 +fi + +sudo "${runtime}" container inspect nginx >/dev/null 2>&1 +if [ $? -ne 0 ]; then + sudo "${runtime}" run \ + --restart=always -d -p ${NGINX_PORT}:80 \ + --volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \ + --volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \ + --name nginx nginx:alpine +fi diff --git a/kubespray/contrib/offline/nginx.conf b/kubespray/contrib/offline/nginx.conf new file mode 100644 index 0000000..a6fd5eb --- /dev/null +++ b/kubespray/contrib/offline/nginx.conf @@ -0,0 +1,39 @@ +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log; +pid /run/nginx.pid; +include /usr/share/nginx/modules/*.conf; +events { + worker_connections 1024; +} +http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /var/log/nginx/access.log main; + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + default_type application/octet-stream; + include /etc/nginx/conf.d/*.conf; + server { + listen 80 default_server; + listen [::]:80 default_server; + server_name _; + include /etc/nginx/default.d/*.conf; + location / { + root /usr/share/nginx/html/download; + autoindex on; + autoindex_exact_size off; + autoindex_localtime on; + } + error_page 404 /404.html; + location = /40x.html { + } + error_page 500 502 503 504 /50x.html; + location = /50x.html { + } + } +} diff --git a/kubespray/contrib/offline/registries.conf b/kubespray/contrib/offline/registries.conf new file mode 100644 index 0000000..6852aee --- /dev/null +++ b/kubespray/contrib/offline/registries.conf @@ -0,0 +1,8 @@ +[registries.search] +registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io'] + +[registries.insecure] +registries = ['HOSTNAME:5000'] + +[registries.block] +registries = [] diff --git a/kubespray/contrib/os-services/os-services.yml b/kubespray/contrib/os-services/os-services.yml new file mode 100644 index 0000000..34c9d8c --- /dev/null +++ b/kubespray/contrib/os-services/os-services.yml @@ -0,0 +1,4 @@ +--- +- hosts: all + roles: + - { role: prepare } diff --git a/kubespray/contrib/os-services/roles/prepare/defaults/main.yml b/kubespray/contrib/os-services/roles/prepare/defaults/main.yml new file mode 100644 index 0000000..9c4a149 --- /dev/null +++ b/kubespray/contrib/os-services/roles/prepare/defaults/main.yml @@ -0,0 +1,2 @@ +--- +disable_service_firewall: false diff --git a/kubespray/contrib/os-services/roles/prepare/tasks/main.yml b/kubespray/contrib/os-services/roles/prepare/tasks/main.yml new file mode 100644 index 0000000..cf72622 --- /dev/null +++ b/kubespray/contrib/os-services/roles/prepare/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- block: + - name: List services + service_facts: + + - name: Disable service firewalld + systemd: + name: firewalld + state: stopped + enabled: no + when: + "'firewalld.service' in services" + + - name: Disable service ufw + systemd: + name: ufw + state: stopped + enabled: no + when: + "'ufw.service' in services" + + when: + - disable_service_firewall is defined and disable_service_firewall diff --git a/kubespray/contrib/terraform/OWNERS b/kubespray/contrib/terraform/OWNERS new file mode 100644 index 0000000..b58878d --- /dev/null +++ b/kubespray/contrib/terraform/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - holmsten + - miouge1 diff --git a/kubespray/contrib/terraform/aws/.gitignore b/kubespray/contrib/terraform/aws/.gitignore new file mode 100644 index 0000000..373687b --- /dev/null +++ b/kubespray/contrib/terraform/aws/.gitignore @@ -0,0 +1,3 @@ +*.tfstate* +.terraform.lock.hcl +.terraform diff --git a/kubespray/contrib/terraform/aws/README.md b/kubespray/contrib/terraform/aws/README.md new file mode 100644 index 0000000..7e3428d --- /dev/null +++ b/kubespray/contrib/terraform/aws/README.md @@ -0,0 +1,162 @@ +# Kubernetes on AWS with Terraform + +## Overview + +This project will create: + +- VPC with Public and Private Subnets in # Availability Zones +- Bastion Hosts and NAT Gateways in the Public Subnet +- A dynamic number of masters, etcd, and worker nodes in the Private Subnet + - even distributed over the # of Availability Zones +- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet + +## Requirements + +- Terraform 0.12.0 or newer + +## How to Use + +- Export the variables for your AWS credentials or edit `credentials.tfvars`: + +```commandline +export TF_VAR_AWS_ACCESS_KEY_ID="www" +export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx" +export TF_VAR_AWS_SSH_KEY_NAME="yyy" +export TF_VAR_AWS_DEFAULT_REGION="zzz" +``` + +- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below. +- Create an AWS EC2 SSH Key +- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials + +Example: + +```commandline +terraform apply -var-file=credentials.tfvars +``` + +- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` +- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args` + +```commandline +ssh -F ./ssh-bastion.conf user@$ip +``` + +- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. + +Example (this one assumes you are using Ubuntu) + +```commandline +ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache +``` + +***Using other distrib than Ubuntu*** +If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf. + +For example, to use: + +- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with + +```ini +data "aws_ami" "distro" { + most_recent = true + + filter { + name = "name" + values = ["debian-jessie-amd64-hvm-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["379101102735"] +} +``` + +- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with + +```ini +data "aws_ami" "distro" { + most_recent = true + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["099720109477"] +} +``` + +- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with + +```ini +data "aws_ami" "distro" { + most_recent = true + + filter { + name = "name" + values = ["dcos-centos7-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["688023202711"] +} +``` + +## Connecting to Kubernetes + +You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder. + +```commandline +# Get the controller's IP address. +CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1) +CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2) + +# Get the hostname of the load balancer. +LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2) + +# Get the controller's SSH fingerprint. +ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1 +ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null + +# Get the kubeconfig from the controller. +mkdir -p ~/.kube +ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf" +scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config +sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config +kubectl get nodes +``` + +## Troubleshooting + +### Remaining AWS IAM Instance Profile + +If the cluster was destroyed without using Terraform it is possible that +the AWS IAM Instance Profiles still remain. To delete them you can use +the `AWS CLI` with the following command: + +```commandline +aws iam delete-instance-profile --region --instance-profile-name +``` + +### Ansible Inventory doesn't get created + +It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file. + +## Architecture + +Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. + +![AWS Infrastructure with Terraform ](docs/aws_kubespray.png) diff --git a/kubespray/contrib/terraform/aws/create-infrastructure.tf b/kubespray/contrib/terraform/aws/create-infrastructure.tf new file mode 100644 index 0000000..0a38844 --- /dev/null +++ b/kubespray/contrib/terraform/aws/create-infrastructure.tf @@ -0,0 +1,179 @@ +terraform { + required_version = ">= 0.12.0" +} + +provider "aws" { + access_key = var.AWS_ACCESS_KEY_ID + secret_key = var.AWS_SECRET_ACCESS_KEY + region = var.AWS_DEFAULT_REGION +} + +data "aws_availability_zones" "available" {} + +/* +* Calling modules who create the initial AWS VPC / AWS ELB +* and AWS IAM Roles for Kubernetes Deployment +*/ + +module "aws-vpc" { + source = "./modules/vpc" + + aws_cluster_name = var.aws_cluster_name + aws_vpc_cidr_block = var.aws_vpc_cidr_block + aws_avail_zones = data.aws_availability_zones.available.names + aws_cidr_subnets_private = var.aws_cidr_subnets_private + aws_cidr_subnets_public = var.aws_cidr_subnets_public + default_tags = var.default_tags +} + +module "aws-nlb" { + source = "./modules/nlb" + + aws_cluster_name = var.aws_cluster_name + aws_vpc_id = module.aws-vpc.aws_vpc_id + aws_avail_zones = data.aws_availability_zones.available.names + aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public + aws_nlb_api_port = var.aws_nlb_api_port + k8s_secure_api_port = var.k8s_secure_api_port + default_tags = var.default_tags +} + +module "aws-iam" { + source = "./modules/iam" + + aws_cluster_name = var.aws_cluster_name +} + +/* +* Create Bastion Instances in AWS +* +*/ + +resource "aws_instance" "bastion-server" { + ami = data.aws_ami.distro.id + instance_type = var.aws_bastion_size + count = var.aws_bastion_num + associate_public_ip_address = true + subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index) + + vpc_security_group_ids = module.aws-vpc.aws_security_group + + key_name = var.AWS_SSH_KEY_NAME + + tags = merge(var.default_tags, tomap({ + Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}" + Cluster = var.aws_cluster_name + Role = "bastion-${var.aws_cluster_name}-${count.index}" + })) +} + +/* +* Create K8s Master and worker nodes and etcd instances +* +*/ + +resource "aws_instance" "k8s-master" { + ami = data.aws_ami.distro.id + instance_type = var.aws_kube_master_size + + count = var.aws_kube_master_num + + subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) + + vpc_security_group_ids = module.aws-vpc.aws_security_group + + root_block_device { + volume_size = var.aws_kube_master_disk_size + } + + iam_instance_profile = module.aws-iam.kube_control_plane-profile + key_name = var.AWS_SSH_KEY_NAME + + tags = merge(var.default_tags, tomap({ + Name = "kubernetes-${var.aws_cluster_name}-master${count.index}" + "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" + Role = "master" + })) +} + +resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" { + count = var.aws_kube_master_num + target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn + target_id = element(aws_instance.k8s-master.*.private_ip, count.index) +} + +resource "aws_instance" "k8s-etcd" { + ami = data.aws_ami.distro.id + instance_type = var.aws_etcd_size + + count = var.aws_etcd_num + + subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) + + vpc_security_group_ids = module.aws-vpc.aws_security_group + + root_block_device { + volume_size = var.aws_etcd_disk_size + } + + key_name = var.AWS_SSH_KEY_NAME + + tags = merge(var.default_tags, tomap({ + Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}" + "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" + Role = "etcd" + })) +} + +resource "aws_instance" "k8s-worker" { + ami = data.aws_ami.distro.id + instance_type = var.aws_kube_worker_size + + count = var.aws_kube_worker_num + + subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) + + vpc_security_group_ids = module.aws-vpc.aws_security_group + + root_block_device { + volume_size = var.aws_kube_worker_disk_size + } + + iam_instance_profile = module.aws-iam.kube-worker-profile + key_name = var.AWS_SSH_KEY_NAME + + tags = merge(var.default_tags, tomap({ + Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}" + "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" + Role = "worker" + })) +} + +/* +* Create Kubespray Inventory File +* +*/ +data "template_file" "inventory" { + template = file("${path.module}/templates/inventory.tpl") + + vars = { + public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip)) + connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip)) + connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip)) + list_master = join("\n", aws_instance.k8s-master.*.private_dns) + list_node = join("\n", aws_instance.k8s-worker.*.private_dns) + connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip)) + list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns))) + nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\"" + } +} + +resource "null_resource" "inventories" { + provisioner "local-exec" { + command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" + } + + triggers = { + template = data.template_file.inventory.rendered + } +} diff --git a/kubespray/contrib/terraform/aws/credentials.tfvars.example b/kubespray/contrib/terraform/aws/credentials.tfvars.example new file mode 100644 index 0000000..19420c5 --- /dev/null +++ b/kubespray/contrib/terraform/aws/credentials.tfvars.example @@ -0,0 +1,8 @@ +#AWS Access Key +AWS_ACCESS_KEY_ID = "" +#AWS Secret Key +AWS_SECRET_ACCESS_KEY = "" +#EC2 SSH Key Name +AWS_SSH_KEY_NAME = "" +#AWS Region +AWS_DEFAULT_REGION = "eu-central-1" diff --git a/kubespray/contrib/terraform/aws/docs/aws_kubespray.png b/kubespray/contrib/terraform/aws/docs/aws_kubespray.png new file mode 100644 index 0000000..40245b8 Binary files /dev/null and b/kubespray/contrib/terraform/aws/docs/aws_kubespray.png differ diff --git a/kubespray/contrib/terraform/aws/modules/iam/main.tf b/kubespray/contrib/terraform/aws/modules/iam/main.tf new file mode 100644 index 0000000..a35afc7 --- /dev/null +++ b/kubespray/contrib/terraform/aws/modules/iam/main.tf @@ -0,0 +1,141 @@ +#Add AWS Roles for Kubernetes + +resource "aws_iam_role" "kube_control_plane" { + name = "kubernetes-${var.aws_cluster_name}-master" + + assume_role_policy = < 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip))) +} + +output "aws_nlb_api_fqdn" { + value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}" +} + +output "inventory" { + value = data.template_file.inventory.rendered +} + +output "default_tags" { + value = var.default_tags +} diff --git a/kubespray/contrib/terraform/aws/sample-inventory/cluster.tfvars b/kubespray/contrib/terraform/aws/sample-inventory/cluster.tfvars new file mode 100644 index 0000000..8aca219 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/cluster.tfvars @@ -0,0 +1,59 @@ +#Global Vars +aws_cluster_name = "devtest" + +#VPC Vars +aws_vpc_cidr_block = "10.250.192.0/18" + +aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] + +aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] + +#Bastion Host +aws_bastion_num = 1 + +aws_bastion_size = "t2.medium" + +#Kubernetes Cluster + +aws_kube_master_num = 3 + +aws_kube_master_size = "t2.medium" + +aws_kube_master_disk_size = 50 + +aws_etcd_num = 3 + +aws_etcd_size = "t2.medium" + +aws_etcd_disk_size = 50 + +aws_kube_worker_num = 4 + +aws_kube_worker_size = "t2.medium" + +aws_kube_worker_disk_size = 50 + +#Settings AWS NLB + +aws_nlb_api_port = 6443 + +k8s_secure_api_port = 6443 + +default_tags = { + # Env = "devtest" # Product = "kubernetes" +} + +inventory_file = "../../../inventory/hosts" + +## Credentials +#AWS Access Key +AWS_ACCESS_KEY_ID = "" + +#AWS Secret Key +AWS_SECRET_ACCESS_KEY = "" + +#EC2 SSH Key Name +AWS_SSH_KEY_NAME = "" + +#AWS Region +AWS_DEFAULT_REGION = "eu-central-1" diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/all.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/aws.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/azure.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/containerd.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/coreos.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/docker.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/etcd.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/gcp.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/oci.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/offline.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/openstack.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/etcd.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/aws/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/aws/templates/inventory.tpl b/kubespray/contrib/terraform/aws/templates/inventory.tpl new file mode 100644 index 0000000..10a3995 --- /dev/null +++ b/kubespray/contrib/terraform/aws/templates/inventory.tpl @@ -0,0 +1,27 @@ +[all] +${connection_strings_master} +${connection_strings_node} +${connection_strings_etcd} +${public_ip_address_bastion} + +[bastion] +${public_ip_address_bastion} + +[kube_control_plane] +${list_master} + +[kube_node] +${list_node} + +[etcd] +${list_etcd} + +[calico_rr] + +[k8s_cluster:children] +kube_node +kube_control_plane +calico_rr + +[k8s_cluster:vars] +${nlb_api_fqdn} diff --git a/kubespray/contrib/terraform/aws/terraform.tfvars b/kubespray/contrib/terraform/aws/terraform.tfvars new file mode 100644 index 0000000..693fa9b --- /dev/null +++ b/kubespray/contrib/terraform/aws/terraform.tfvars @@ -0,0 +1,43 @@ +#Global Vars +aws_cluster_name = "devtest" + +#VPC Vars +aws_vpc_cidr_block = "10.250.192.0/18" +aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] +aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] + +# single AZ deployment +#aws_cidr_subnets_private = ["10.250.192.0/20"] +#aws_cidr_subnets_public = ["10.250.224.0/20"] + +# 3+ AZ deployment +#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"] +#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"] + +#Bastion Host +aws_bastion_num = 1 +aws_bastion_size = "t3.small" + +#Kubernetes Cluster +aws_kube_master_num = 3 +aws_kube_master_size = "t3.medium" +aws_kube_master_disk_size = 50 + +aws_etcd_num = 0 +aws_etcd_size = "t3.medium" +aws_etcd_disk_size = 50 + +aws_kube_worker_num = 4 +aws_kube_worker_size = "t3.medium" +aws_kube_worker_disk_size = 50 + +#Settings AWS ELB +aws_nlb_api_port = 6443 +k8s_secure_api_port = 6443 + +default_tags = { + # Env = "devtest" + # Product = "kubernetes" +} + +inventory_file = "../../../inventory/hosts" diff --git a/kubespray/contrib/terraform/aws/terraform.tfvars.example b/kubespray/contrib/terraform/aws/terraform.tfvars.example new file mode 100644 index 0000000..584b6a2 --- /dev/null +++ b/kubespray/contrib/terraform/aws/terraform.tfvars.example @@ -0,0 +1,33 @@ +#Global Vars +aws_cluster_name = "devtest" + +#VPC Vars +aws_vpc_cidr_block = "10.250.192.0/18" +aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"] +aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"] +aws_avail_zones = ["eu-central-1a","eu-central-1b"] + +#Bastion Host +aws_bastion_num = 1 +aws_bastion_size = "t3.small" + +#Kubernetes Cluster +aws_kube_master_num = 3 +aws_kube_master_size = "t3.medium" +aws_kube_master_disk_size = 50 + +aws_etcd_num = 3 +aws_etcd_size = "t3.medium" +aws_etcd_disk_size = 50 + +aws_kube_worker_num = 4 +aws_kube_worker_size = "t3.medium" +aws_kube_worker_disk_size = 50 + +#Settings AWS ELB +aws_nlb_api_port = 6443 +k8s_secure_api_port = 6443 + +default_tags = { } + +inventory_file = "../../../inventory/hosts" diff --git a/kubespray/contrib/terraform/aws/variables.tf b/kubespray/contrib/terraform/aws/variables.tf new file mode 100644 index 0000000..479629e --- /dev/null +++ b/kubespray/contrib/terraform/aws/variables.tf @@ -0,0 +1,125 @@ +variable "AWS_ACCESS_KEY_ID" { + description = "AWS Access Key" +} + +variable "AWS_SECRET_ACCESS_KEY" { + description = "AWS Secret Key" +} + +variable "AWS_SSH_KEY_NAME" { + description = "Name of the SSH keypair to use in AWS." +} + +variable "AWS_DEFAULT_REGION" { + description = "AWS Region" +} + +//General Cluster Settings + +variable "aws_cluster_name" { + description = "Name of AWS Cluster" +} + +data "aws_ami" "distro" { + most_recent = true + + filter { + name = "name" + values = ["debian-10-amd64-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["136693071363"] # Debian-10 +} + +//AWS VPC Variables + +variable "aws_vpc_cidr_block" { + description = "CIDR Block for VPC" +} + +variable "aws_cidr_subnets_private" { + description = "CIDR Blocks for private subnets in Availability Zones" + type = list(string) +} + +variable "aws_cidr_subnets_public" { + description = "CIDR Blocks for public subnets in Availability Zones" + type = list(string) +} + +//AWS EC2 Settings + +variable "aws_bastion_size" { + description = "EC2 Instance Size of Bastion Host" +} + +/* +* AWS EC2 Settings +* The number should be divisable by the number of used +* AWS Availability Zones without an remainder. +*/ +variable "aws_bastion_num" { + description = "Number of Bastion Nodes" +} + +variable "aws_kube_master_num" { + description = "Number of Kubernetes Master Nodes" +} + +variable "aws_kube_master_disk_size" { + description = "Disk size for Kubernetes Master Nodes (in GiB)" +} + +variable "aws_kube_master_size" { + description = "Instance size of Kube Master Nodes" +} + +variable "aws_etcd_num" { + description = "Number of etcd Nodes" +} + +variable "aws_etcd_disk_size" { + description = "Disk size for etcd Nodes (in GiB)" +} + +variable "aws_etcd_size" { + description = "Instance size of etcd Nodes" +} + +variable "aws_kube_worker_num" { + description = "Number of Kubernetes Worker Nodes" +} + +variable "aws_kube_worker_disk_size" { + description = "Disk size for Kubernetes Worker Nodes (in GiB)" +} + +variable "aws_kube_worker_size" { + description = "Instance size of Kubernetes Worker Nodes" +} + +/* +* AWS NLB Settings +* +*/ +variable "aws_nlb_api_port" { + description = "Port for AWS NLB" +} + +variable "k8s_secure_api_port" { + description = "Secure Port of K8S API Server" +} + +variable "default_tags" { + description = "Default tags for all resources" + type = map(string) +} + +variable "inventory_file" { + description = "Where to store the generated inventory file" +} diff --git a/kubespray/contrib/terraform/exoscale/README.md b/kubespray/contrib/terraform/exoscale/README.md new file mode 100644 index 0000000..be451cc --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/README.md @@ -0,0 +1,152 @@ +# Kubernetes on Exoscale with Terraform + +Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray + +## Overview + +The setup looks like following + +```text + Kubernetes cluster + +-----------------------+ ++---------------+ | +--------------+ | +| | | | +--------------+ | +| API server LB +---------> | | | | +| | | | | Master/etcd | | ++---------------+ | | | node(s) | | + | +-+ | | + | +--------------+ | + | ^ | + | | | + | v | ++---------------+ | +--------------+ | +| | | | +--------------+ | +| Ingress LB +---------> | | | | +| | | | | Worker | | ++---------------+ | | | node(s) | | + | +-+ | | + | +--------------+ | + +-----------------------+ +``` + +## Requirements + +* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files) + +## Quickstart + +NOTE: *Assumes you are at the root of the kubespray repo* + +Copy the sample inventory for your cluster and copy the default terraform variables. + +```bash +CLUSTER=my-exoscale-cluster +cp -r inventory/sample inventory/$CLUSTER +cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/ +cd inventory/$CLUSTER +``` + +Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`. + +```bash +# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. +$EDITOR default.tfvars +``` + +For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`. +The file should look like something like this: + +```ini +[cloudstack] +key = +secret = +``` + +Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys. + +### Encrypted credentials + +To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime. + +```bash +cat << EOF > cloudstack.ini +[cloudstack] +key = +secret = +EOF +sops --encrypt --in-place --pgp cloudstack.ini +sops cloudstack.ini +``` + +Run terraform to create the infrastructure + +```bash +terraform init ../../contrib/terraform/exoscale +terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale +``` + +If your cloudstack credentials file is encrypted using sops, run the following: + +```bash +terraform init ../../contrib/terraform/exoscale +sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale' +``` + +You should now have a inventory file named `inventory.ini` that you can use with kubespray. +You can now copy your inventory file and use it with kubespray to set up a cluster. +You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer. + +It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: + +```bash +ansible -i inventory.ini -m ping all +``` + +Example to use this with the default sample inventory: + +```bash +ansible-playbook -i inventory.ini ../../cluster.yml -b -v +``` + +## Teardown + +The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy: + +```bash +terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale +``` + +## Variables + +### Required + +* `ssh_public_keys`: List of public SSH keys to install on all machines +* `zone`: The zone where to run the cluster +* `machines`: Machines to provision. Key of this object will be used as the name of the machine + * `node_type`: The role of this node *(master|worker)* + * `size`: The size to use + * `boot_disk`: The boot disk to use + * `image_name`: Name of the image + * `root_partition_size`: Size *(in GB)* for the root partition + * `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)* + * `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)* +* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes +* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server +* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) + +### Optional + +* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)* + +An example variables file can be found `default.tfvars` + +## Known limitations + +### Only single disk + +Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local). + +### No Kubernetes API + +The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager). +This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode. diff --git a/kubespray/contrib/terraform/exoscale/default.tfvars b/kubespray/contrib/terraform/exoscale/default.tfvars new file mode 100644 index 0000000..2bcbef5 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/default.tfvars @@ -0,0 +1,65 @@ +prefix = "default" +zone = "ch-gva-2" + +inventory_file = "inventory.ini" + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa I-did-not-read-the-docs", + "ssh-rsa I-did-not-read-the-docs 2", +] + +machines = { + "master-0" : { + "node_type" : "master", + "size" : "Medium", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + }, + "worker-0" : { + "node_type" : "worker", + "size" : "Large", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + }, + "worker-1" : { + "node_type" : "worker", + "size" : "Large", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + }, + "worker-2" : { + "node_type" : "worker", + "size" : "Large", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + } +} + +nodeport_whitelist = [ + "0.0.0.0/0" +] + +ssh_whitelist = [ + "0.0.0.0/0" +] + +api_server_whitelist = [ + "0.0.0.0/0" +] diff --git a/kubespray/contrib/terraform/exoscale/main.tf b/kubespray/contrib/terraform/exoscale/main.tf new file mode 100644 index 0000000..eb9fcab --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/main.tf @@ -0,0 +1,49 @@ +provider "exoscale" {} + +module "kubernetes" { + source = "./modules/kubernetes-cluster" + + prefix = var.prefix + zone = var.zone + machines = var.machines + + ssh_public_keys = var.ssh_public_keys + + ssh_whitelist = var.ssh_whitelist + api_server_whitelist = var.api_server_whitelist + nodeport_whitelist = var.nodeport_whitelist +} + +# +# Generate ansible inventory +# + +data "template_file" "inventory" { + template = file("${path.module}/templates/inventory.tpl") + + vars = { + connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", + keys(module.kubernetes.master_ip_addresses), + values(module.kubernetes.master_ip_addresses).*.public_ip, + values(module.kubernetes.master_ip_addresses).*.private_ip, + range(1, length(module.kubernetes.master_ip_addresses) + 1))) + connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", + keys(module.kubernetes.worker_ip_addresses), + values(module.kubernetes.worker_ip_addresses).*.public_ip, + values(module.kubernetes.worker_ip_addresses).*.private_ip)) + + list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) + list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) + api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address + } +} + +resource "null_resource" "inventories" { + provisioner "local-exec" { + command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" + } + + triggers = { + template = data.template_file.inventory.rendered + } +} diff --git a/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf new file mode 100644 index 0000000..3171b00 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf @@ -0,0 +1,193 @@ +data "exoscale_compute_template" "os_image" { + for_each = var.machines + + zone = var.zone + name = each.value.boot_disk.image_name +} + +data "exoscale_compute" "master_nodes" { + for_each = exoscale_compute.master + + id = each.value.id + + # Since private IP address is not assigned until the nics are created we need this + depends_on = [exoscale_nic.master_private_network_nic] +} + +data "exoscale_compute" "worker_nodes" { + for_each = exoscale_compute.worker + + id = each.value.id + + # Since private IP address is not assigned until the nics are created we need this + depends_on = [exoscale_nic.worker_private_network_nic] +} + +resource "exoscale_network" "private_network" { + zone = var.zone + name = "${var.prefix}-network" + + start_ip = cidrhost(var.private_network_cidr, 1) + # cidr -1 = Broadcast address + # cidr -2 = DHCP server address (exoscale specific) + end_ip = cidrhost(var.private_network_cidr, -3) + netmask = cidrnetmask(var.private_network_cidr) +} + +resource "exoscale_compute" "master" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "master" + } + + display_name = "${var.prefix}-${each.key}" + template_id = data.exoscale_compute_template.os_image[each.key].id + size = each.value.size + disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size + state = "Running" + zone = var.zone + security_groups = [exoscale_security_group.master_sg.name] + + user_data = templatefile( + "${path.module}/templates/cloud-init.tmpl", + { + eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address + node_local_partition_size = each.value.boot_disk.node_local_partition_size + ceph_partition_size = each.value.boot_disk.ceph_partition_size + root_partition_size = each.value.boot_disk.root_partition_size + node_type = "master" + ssh_public_keys = var.ssh_public_keys + } + ) +} + +resource "exoscale_compute" "worker" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "worker" + } + + display_name = "${var.prefix}-${each.key}" + template_id = data.exoscale_compute_template.os_image[each.key].id + size = each.value.size + disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size + state = "Running" + zone = var.zone + security_groups = [exoscale_security_group.worker_sg.name] + + user_data = templatefile( + "${path.module}/templates/cloud-init.tmpl", + { + eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address + node_local_partition_size = each.value.boot_disk.node_local_partition_size + ceph_partition_size = each.value.boot_disk.ceph_partition_size + root_partition_size = each.value.boot_disk.root_partition_size + node_type = "worker" + ssh_public_keys = var.ssh_public_keys + } + ) +} + +resource "exoscale_nic" "master_private_network_nic" { + for_each = exoscale_compute.master + + compute_id = each.value.id + network_id = exoscale_network.private_network.id +} + +resource "exoscale_nic" "worker_private_network_nic" { + for_each = exoscale_compute.worker + + compute_id = each.value.id + network_id = exoscale_network.private_network.id +} + +resource "exoscale_security_group" "master_sg" { + name = "${var.prefix}-master-sg" + description = "Security group for Kubernetes masters" +} + +resource "exoscale_security_group_rules" "master_sg_rules" { + security_group_id = exoscale_security_group.master_sg.id + + # SSH + ingress { + protocol = "TCP" + cidr_list = var.ssh_whitelist + ports = ["22"] + } + + # Kubernetes API + ingress { + protocol = "TCP" + cidr_list = var.api_server_whitelist + ports = ["6443"] + } +} + +resource "exoscale_security_group" "worker_sg" { + name = "${var.prefix}-worker-sg" + description = "security group for kubernetes worker nodes" +} + +resource "exoscale_security_group_rules" "worker_sg_rules" { + security_group_id = exoscale_security_group.worker_sg.id + + # SSH + ingress { + protocol = "TCP" + cidr_list = var.ssh_whitelist + ports = ["22"] + } + + # HTTP(S) + ingress { + protocol = "TCP" + cidr_list = ["0.0.0.0/0"] + ports = ["80", "443"] + } + + # Kubernetes Nodeport + ingress { + protocol = "TCP" + cidr_list = var.nodeport_whitelist + ports = ["30000-32767"] + } +} + +resource "exoscale_ipaddress" "ingress_controller_lb" { + zone = var.zone + healthcheck_mode = "http" + healthcheck_port = 80 + healthcheck_path = "/healthz" + healthcheck_interval = 10 + healthcheck_timeout = 2 + healthcheck_strikes_ok = 2 + healthcheck_strikes_fail = 3 +} + +resource "exoscale_secondary_ipaddress" "ingress_controller_lb" { + for_each = exoscale_compute.worker + + compute_id = each.value.id + ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address +} + +resource "exoscale_ipaddress" "control_plane_lb" { + zone = var.zone + healthcheck_mode = "tcp" + healthcheck_port = 6443 + healthcheck_interval = 10 + healthcheck_timeout = 2 + healthcheck_strikes_ok = 2 + healthcheck_strikes_fail = 3 +} + +resource "exoscale_secondary_ipaddress" "control_plane_lb" { + for_each = exoscale_compute.master + + compute_id = each.value.id + ip_address = exoscale_ipaddress.control_plane_lb.ip_address +} diff --git a/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf new file mode 100644 index 0000000..bb80b5b --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf @@ -0,0 +1,31 @@ +output "master_ip_addresses" { + value = { + for key, instance in exoscale_compute.master : + instance.name => { + "private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : "" + "public_ip" = exoscale_compute.master[key].ip_address + } + } +} + +output "worker_ip_addresses" { + value = { + for key, instance in exoscale_compute.worker : + instance.name => { + "private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : "" + "public_ip" = exoscale_compute.worker[key].ip_address + } + } +} + +output "cluster_private_network_cidr" { + value = var.private_network_cidr +} + +output "ingress_controller_lb_ip_address" { + value = exoscale_ipaddress.ingress_controller_lb.ip_address +} + +output "control_plane_lb_ip_address" { + value = exoscale_ipaddress.control_plane_lb.ip_address +} diff --git a/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl new file mode 100644 index 0000000..a81b8e3 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl @@ -0,0 +1,52 @@ +#cloud-config +%{ if ceph_partition_size > 0 || node_local_partition_size > 0} +bootcmd: +- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ] +%{ if node_local_partition_size > 0 } + # Create partition for node local storage +- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ] +- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ] +%{ endif } +%{ if ceph_partition_size > 0 } + # Create partition for rook to use for ceph +- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ] +%{ endif } +%{ endif } + +ssh_authorized_keys: +%{ for ssh_public_key in ssh_public_keys ~} + - ${ssh_public_key} +%{ endfor ~} + +write_files: + - path: /etc/netplan/eth1.yaml + content: | + network: + version: 2 + ethernets: + eth1: + dhcp4: true +%{ if node_type == "worker" } + # TODO: When a VM is seen as healthy and is added to the EIP loadbalancer + # pool it no longer can send traffic back to itself via the EIP IP + # address. + # Remove this if it ever gets solved. + - path: /etc/netplan/20-eip-fix.yaml + content: | + network: + version: 2 + ethernets: + "lo:0": + match: + name: lo + dhcp4: false + addresses: + - ${eip_ip_address}/32 +%{ endif } +runcmd: + - netplan apply +%{ if node_local_partition_size > 0 } + - mkdir -p /mnt/disks/node-local-storage + - chown nobody:nogroup /mnt/disks/node-local-storage + - mount /dev/vda2 /mnt/disks/node-local-storage +%{ endif } diff --git a/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..c466abf --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf @@ -0,0 +1,42 @@ +variable "zone" { + type = string + # This is currently the only zone that is supposed to be supporting + # so called "managed private networks". + # See: https://www.exoscale.com/syslog/introducing-managed-private-networks + default = "ch-gva-2" +} + +variable "prefix" {} + +variable "machines" { + type = map(object({ + node_type = string + size = string + boot_disk = object({ + image_name = string + root_partition_size = number + ceph_partition_size = number + node_local_partition_size = number + }) + })) +} + +variable "ssh_public_keys" { + type = list(string) +} + +variable "ssh_whitelist" { + type = list(string) +} + +variable "api_server_whitelist" { + type = list(string) +} + +variable "nodeport_whitelist" { + type = list(string) +} + +variable "private_network_cidr" { + default = "172.0.10.0/24" +} diff --git a/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf new file mode 100644 index 0000000..6f60994 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + exoscale = { + source = "exoscale/exoscale" + version = ">= 0.21" + } + } + required_version = ">= 0.13" +} diff --git a/kubespray/contrib/terraform/exoscale/output.tf b/kubespray/contrib/terraform/exoscale/output.tf new file mode 100644 index 0000000..09bf7fa --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/output.tf @@ -0,0 +1,15 @@ +output "master_ips" { + value = module.kubernetes.master_ip_addresses +} + +output "worker_ips" { + value = module.kubernetes.worker_ip_addresses +} + +output "ingress_controller_lb_ip_address" { + value = module.kubernetes.ingress_controller_lb_ip_address +} + +output "control_plane_lb_ip_address" { + value = module.kubernetes.control_plane_lb_ip_address +} diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/cluster.tfvars b/kubespray/contrib/terraform/exoscale/sample-inventory/cluster.tfvars new file mode 100644 index 0000000..f615241 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/cluster.tfvars @@ -0,0 +1,65 @@ +prefix = "default" +zone = "ch-gva-2" + +inventory_file = "inventory.ini" + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa I-did-not-read-the-docs", + "ssh-rsa I-did-not-read-the-docs 2", +] + +machines = { + "master-0" : { + "node_type" : "master", + "size" : "Small", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + }, + "worker-0" : { + "node_type" : "worker", + "size" : "Large", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + }, + "worker-1" : { + "node_type" : "worker", + "size" : "Large", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + }, + "worker-2" : { + "node_type" : "worker", + "size" : "Large", + "boot_disk" : { + "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", + "root_partition_size" : 50, + "node_local_partition_size" : 0, + "ceph_partition_size" : 0 + } + } +} + +nodeport_whitelist = [ + "0.0.0.0/0" +] + +ssh_whitelist = [ + "0.0.0.0/0" +] + +api_server_whitelist = [ + "0.0.0.0/0" +] diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/all.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/aws.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/azure.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/containerd.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/coreos.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/docker.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/etcd.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/gcp.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/oci.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/offline.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/openstack.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/etcd.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/exoscale/templates/inventory.tpl b/kubespray/contrib/terraform/exoscale/templates/inventory.tpl new file mode 100644 index 0000000..85ed192 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/templates/inventory.tpl @@ -0,0 +1,19 @@ +[all] +${connection_strings_master} +${connection_strings_worker} + +[kube_control_plane] +${list_master} + +[kube_control_plane:vars] +supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ] + +[etcd] +${list_master} + +[kube_node] +${list_worker} + +[k8s_cluster:children] +kube_control_plane +kube_node diff --git a/kubespray/contrib/terraform/exoscale/variables.tf b/kubespray/contrib/terraform/exoscale/variables.tf new file mode 100644 index 0000000..14f8455 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/variables.tf @@ -0,0 +1,46 @@ +variable "zone" { + description = "The zone where to run the cluster" +} + +variable "prefix" { + description = "Prefix for resource names" + default = "default" +} + +variable "machines" { + description = "Cluster machines" + type = map(object({ + node_type = string + size = string + boot_disk = object({ + image_name = string + root_partition_size = number + ceph_partition_size = number + node_local_partition_size = number + }) + })) +} + +variable "ssh_public_keys" { + description = "List of public SSH keys which are injected into the VMs." + type = list(string) +} + +variable "ssh_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for ssh" + type = list(string) +} + +variable "api_server_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" + type = list(string) +} + +variable "nodeport_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" + type = list(string) +} + +variable "inventory_file" { + description = "Where to store the generated inventory file" +} diff --git a/kubespray/contrib/terraform/exoscale/versions.tf b/kubespray/contrib/terraform/exoscale/versions.tf new file mode 100644 index 0000000..0333b41 --- /dev/null +++ b/kubespray/contrib/terraform/exoscale/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + exoscale = { + source = "exoscale/exoscale" + version = ">= 0.21" + } + null = { + source = "hashicorp/null" + } + template = { + source = "hashicorp/template" + } + } + required_version = ">= 0.13" +} diff --git a/kubespray/contrib/terraform/gcp/README.md b/kubespray/contrib/terraform/gcp/README.md new file mode 100644 index 0000000..c3e6eec --- /dev/null +++ b/kubespray/contrib/terraform/gcp/README.md @@ -0,0 +1,99 @@ +# Kubernetes on GCP with Terraform + +Provision a Kubernetes cluster on GCP using Terraform and Kubespray + +## Overview + +The setup looks like following + +```text + Kubernetes cluster + +-----------------------+ ++---------------+ | +--------------+ | +| | | | +--------------+ | +| API server LB +---------> | | | | +| | | | | Master/etcd | | ++---------------+ | | | node(s) | | + | +-+ | | + | +--------------+ | + | ^ | + | | | + | v | ++---------------+ | +--------------+ | +| | | | +--------------+ | +| Ingress LB +---------> | | | | +| | | | | Worker | | ++---------------+ | | | node(s) | | + | +-+ | | + | +--------------+ | + +-----------------------+ +``` + +## Requirements + +* Terraform 0.12.0 or newer + +## Quickstart + +To get a cluster up and running you'll need a JSON keyfile. +Set the path to the file in the `tfvars.json` file and run the following: + +```bash +terraform apply -var-file tfvars.json -state dev-cluster.tfstate -var gcp_project_id= -var keyfile_location= +``` + +To generate kubespray inventory based on the terraform state file you can run the following: + +```bash +./generate-inventory.sh dev-cluster.tfstate > inventory.ini +``` + +You should now have a inventory file named `inventory.ini` that you can use with kubespray, e.g. + +```bash +ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v +``` + +## Variables + +### Required + +* `keyfile_location`: Location to the keyfile to use as credentials for the google terraform provider +* `gcp_project_id`: ID of the GCP project to deploy the cluster in +* `ssh_pub_key`: Path to public ssh key to use for all machines +* `region`: The region where to run the cluster +* `machines`: Machines to provision. Key of this object will be used as the name of the machine + * `node_type`: The role of this node *(master|worker)* + * `size`: The size to use + * `zone`: The zone the machine should run in + * `additional_disks`: Extra disks to add to the machine. Key of this object will be used as the disk name + * `size`: Size of the disk (in GB) + * `boot_disk`: The boot disk to use + * `image_name`: Name of the image + * `size`: Size of the boot disk (in GB) +* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes +* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server +* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) +* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443 + +### Optional + +* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)* +* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)* +* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)* +* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible) + for the control plane nodes *(Defaults to `false`)* +* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types) + for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)* +* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)* +* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)* +* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible) + for the worker nodes *(Defaults to `false`)* +* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types) + for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)* + +An example variables file can be found `tfvars.json` + +## Known limitations + +This solution does not provide a solution to use a bastion host. Thus all the nodes must expose a public IP for kubespray to work. diff --git a/kubespray/contrib/terraform/gcp/generate-inventory.sh b/kubespray/contrib/terraform/gcp/generate-inventory.sh new file mode 100755 index 0000000..585a4f4 --- /dev/null +++ b/kubespray/contrib/terraform/gcp/generate-inventory.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +# +# Generates a inventory file based on the terraform output. +# After provisioning a cluster, simply run this command and supply the terraform state file +# Default state file is terraform.tfstate +# + +set -e + +usage () { + echo "Usage: $0 " >&2 + exit 1 +} + +if [[ $# -ne 1 ]]; then + usage +fi + +TF_STATE_FILE=${1} + +if [[ ! -f "${TF_STATE_FILE}" ]]; then + echo "ERROR: state file ${TF_STATE_FILE} doesn't exist" >&2 + usage +fi + +TF_OUT=$(terraform output -state "${TF_STATE_FILE}" -json) + +MASTERS=$(jq -r '.master_ips.value | to_entries[]' <(echo "${TF_OUT}")) +WORKERS=$(jq -r '.worker_ips.value | to_entries[]' <(echo "${TF_OUT}")) +mapfile -t MASTER_NAMES < <(jq -r '.key' <(echo "${MASTERS}")) +mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) + +API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}")) + +# Generate master hosts +i=1 +for name in "${MASTER_NAMES[@]}"; do + private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}")) + public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${MASTERS}")) + echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip} etcd_member_name=etcd${i}" + i=$(( i + 1 )) +done + +# Generate worker hosts +for name in "${WORKER_NAMES[@]}"; do + private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}")) + public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${WORKERS}")) + echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip}" +done + +echo "" +echo "[kube_control_plane]" +for name in "${MASTER_NAMES[@]}"; do + echo "${name}" +done + +echo "" +echo "[kube_control_plane:vars]" +echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate +echo "" +echo "[etcd]" +for name in "${MASTER_NAMES[@]}"; do + echo "${name}" +done + +echo "" +echo "[kube_node]" +for name in "${WORKER_NAMES[@]}"; do + echo "${name}" +done + +echo "" +echo "[k8s_cluster:children]" +echo "kube_control_plane" +echo "kube_node" diff --git a/kubespray/contrib/terraform/gcp/main.tf b/kubespray/contrib/terraform/gcp/main.tf new file mode 100644 index 0000000..a908377 --- /dev/null +++ b/kubespray/contrib/terraform/gcp/main.tf @@ -0,0 +1,37 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google" + version = "~> 4.0" + } + } +} + +provider "google" { + credentials = file(var.keyfile_location) + region = var.region + project = var.gcp_project_id +} + +module "kubernetes" { + source = "./modules/kubernetes-cluster" + region = var.region + prefix = var.prefix + + machines = var.machines + ssh_pub_key = var.ssh_pub_key + + master_sa_email = var.master_sa_email + master_sa_scopes = var.master_sa_scopes + master_preemptible = var.master_preemptible + master_additional_disk_type = var.master_additional_disk_type + worker_sa_email = var.worker_sa_email + worker_sa_scopes = var.worker_sa_scopes + worker_preemptible = var.worker_preemptible + worker_additional_disk_type = var.worker_additional_disk_type + + ssh_whitelist = var.ssh_whitelist + api_server_whitelist = var.api_server_whitelist + nodeport_whitelist = var.nodeport_whitelist + ingress_whitelist = var.ingress_whitelist +} diff --git a/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf b/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf new file mode 100644 index 0000000..3ad64ca --- /dev/null +++ b/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf @@ -0,0 +1,400 @@ +################################################# +## +## General +## + +resource "google_compute_network" "main" { + name = "${var.prefix}-network" + + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "main" { + name = "${var.prefix}-subnet" + network = google_compute_network.main.name + ip_cidr_range = var.private_network_cidr + region = var.region +} + +resource "google_compute_firewall" "deny_all" { + name = "${var.prefix}-default-firewall" + network = google_compute_network.main.name + + priority = 1000 + + source_ranges = ["0.0.0.0/0"] + + deny { + protocol = "all" + } +} + +resource "google_compute_firewall" "allow_internal" { + name = "${var.prefix}-internal-firewall" + network = google_compute_network.main.name + + priority = 500 + + source_ranges = [var.private_network_cidr] + + allow { + protocol = "all" + } +} + +resource "google_compute_firewall" "ssh" { + count = length(var.ssh_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-ssh-firewall" + network = google_compute_network.main.name + + priority = 100 + + source_ranges = var.ssh_whitelist + + allow { + protocol = "tcp" + ports = ["22"] + } +} + +resource "google_compute_firewall" "api_server" { + count = length(var.api_server_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-api-server-firewall" + network = google_compute_network.main.name + + priority = 100 + + source_ranges = var.api_server_whitelist + + allow { + protocol = "tcp" + ports = ["6443"] + } +} + +resource "google_compute_firewall" "nodeport" { + count = length(var.nodeport_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-nodeport-firewall" + network = google_compute_network.main.name + + priority = 100 + + source_ranges = var.nodeport_whitelist + + allow { + protocol = "tcp" + ports = ["30000-32767"] + } +} + +resource "google_compute_firewall" "ingress_http" { + count = length(var.ingress_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-http-ingress-firewall" + network = google_compute_network.main.name + + priority = 100 + + source_ranges = var.ingress_whitelist + + allow { + protocol = "tcp" + ports = ["80"] + } +} + +resource "google_compute_firewall" "ingress_https" { + count = length(var.ingress_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-https-ingress-firewall" + network = google_compute_network.main.name + + priority = 100 + + source_ranges = var.ingress_whitelist + + allow { + protocol = "tcp" + ports = ["443"] + } +} + +################################################# +## +## Local variables +## + +locals { + master_target_list = [ + for name, machine in google_compute_instance.master : + "${machine.zone}/${machine.name}" + ] + + worker_target_list = [ + for name, machine in google_compute_instance.worker : + "${machine.zone}/${machine.name}" + ] + + master_disks = flatten([ + for machine_name, machine in var.machines : [ + for disk_name, disk in machine.additional_disks : { + "${machine_name}-${disk_name}" = { + "machine_name": machine_name, + "machine": machine, + "disk_size": disk.size, + "disk_name": disk_name + } + } + ] + if machine.node_type == "master" + ]) + + worker_disks = flatten([ + for machine_name, machine in var.machines : [ + for disk_name, disk in machine.additional_disks : { + "${machine_name}-${disk_name}" = { + "machine_name": machine_name, + "machine": machine, + "disk_size": disk.size, + "disk_name": disk_name + } + } + ] + if machine.node_type == "worker" + ]) +} + +################################################# +## +## Master +## + +resource "google_compute_address" "master" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "master" + } + + name = "${var.prefix}-${each.key}-pip" + address_type = "EXTERNAL" + region = var.region +} + +resource "google_compute_disk" "master" { + for_each = { + for item in local.master_disks : + keys(item)[0] => values(item)[0] + } + + name = "${var.prefix}-${each.key}" + type = var.master_additional_disk_type + zone = each.value.machine.zone + size = each.value.disk_size + + physical_block_size_bytes = 4096 +} + +resource "google_compute_attached_disk" "master" { + for_each = { + for item in local.master_disks : + keys(item)[0] => values(item)[0] + } + + disk = google_compute_disk.master[each.key].id + instance = google_compute_instance.master[each.value.machine_name].id +} + +resource "google_compute_instance" "master" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "master" + } + + name = "${var.prefix}-${each.key}" + machine_type = each.value.size + zone = each.value.zone + + tags = ["master"] + + boot_disk { + initialize_params { + image = each.value.boot_disk.image_name + size = each.value.boot_disk.size + } + } + + network_interface { + subnetwork = google_compute_subnetwork.main.name + + access_config { + nat_ip = google_compute_address.master[each.key].address + } + } + + metadata = { + ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}" + } + + service_account { + email = var.master_sa_email + scopes = var.master_sa_scopes + } + + # Since we use google_compute_attached_disk we need to ignore this + lifecycle { + ignore_changes = [attached_disk] + } + + scheduling { + preemptible = var.master_preemptible + automatic_restart = !var.master_preemptible + } +} + +resource "google_compute_forwarding_rule" "master_lb" { + count = length(var.api_server_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-master-lb-forward-rule" + + port_range = "6443" + + target = google_compute_target_pool.master_lb[count.index].id +} + +resource "google_compute_target_pool" "master_lb" { + count = length(var.api_server_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-master-lb-pool" + instances = local.master_target_list +} + +################################################# +## +## Worker +## + +resource "google_compute_disk" "worker" { + for_each = { + for item in local.worker_disks : + keys(item)[0] => values(item)[0] + } + + name = "${var.prefix}-${each.key}" + type = var.worker_additional_disk_type + zone = each.value.machine.zone + size = each.value.disk_size + + physical_block_size_bytes = 4096 +} + +resource "google_compute_attached_disk" "worker" { + for_each = { + for item in local.worker_disks : + keys(item)[0] => values(item)[0] + } + + disk = google_compute_disk.worker[each.key].id + instance = google_compute_instance.worker[each.value.machine_name].id +} + +resource "google_compute_address" "worker" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "worker" + } + + name = "${var.prefix}-${each.key}-pip" + address_type = "EXTERNAL" + region = var.region +} + +resource "google_compute_instance" "worker" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "worker" + } + + name = "${var.prefix}-${each.key}" + machine_type = each.value.size + zone = each.value.zone + + tags = ["worker"] + + boot_disk { + initialize_params { + image = each.value.boot_disk.image_name + size = each.value.boot_disk.size + } + } + + network_interface { + subnetwork = google_compute_subnetwork.main.name + + access_config { + nat_ip = google_compute_address.worker[each.key].address + } + } + + metadata = { + ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}" + } + + service_account { + email = var.worker_sa_email + scopes = var.worker_sa_scopes + } + + # Since we use google_compute_attached_disk we need to ignore this + lifecycle { + ignore_changes = [attached_disk] + } + + scheduling { + preemptible = var.worker_preemptible + automatic_restart = !var.worker_preemptible + } +} + +resource "google_compute_address" "worker_lb" { + count = length(var.ingress_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-worker-lb-address" + address_type = "EXTERNAL" + region = var.region +} + +resource "google_compute_forwarding_rule" "worker_http_lb" { + count = length(var.ingress_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-worker-http-lb-forward-rule" + + ip_address = google_compute_address.worker_lb[count.index].address + port_range = "80" + + target = google_compute_target_pool.worker_lb[count.index].id +} + +resource "google_compute_forwarding_rule" "worker_https_lb" { + count = length(var.ingress_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-worker-https-lb-forward-rule" + + ip_address = google_compute_address.worker_lb[count.index].address + port_range = "443" + + target = google_compute_target_pool.worker_lb[count.index].id +} + +resource "google_compute_target_pool" "worker_lb" { + count = length(var.ingress_whitelist) > 0 ? 1 : 0 + + name = "${var.prefix}-worker-lb-pool" + instances = local.worker_target_list +} diff --git a/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf b/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf new file mode 100644 index 0000000..d0ffaa9 --- /dev/null +++ b/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf @@ -0,0 +1,27 @@ +output "master_ip_addresses" { + value = { + for key, instance in google_compute_instance.master : + instance.name => { + "private_ip" = instance.network_interface.0.network_ip + "public_ip" = instance.network_interface.0.access_config.0.nat_ip + } + } +} + +output "worker_ip_addresses" { + value = { + for key, instance in google_compute_instance.worker : + instance.name => { + "private_ip" = instance.network_interface.0.network_ip + "public_ip" = instance.network_interface.0.access_config.0.nat_ip + } + } +} + +output "ingress_controller_lb_ip_address" { + value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : "" +} + +output "control_plane_lb_ip_address" { + value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : "" +} diff --git a/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf b/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..16e616a --- /dev/null +++ b/kubespray/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf @@ -0,0 +1,75 @@ +variable "region" { + type = string +} + +variable "prefix" {} + +variable "machines" { + type = map(object({ + node_type = string + size = string + zone = string + additional_disks = map(object({ + size = number + })) + boot_disk = object({ + image_name = string + size = number + }) + })) +} + +variable "master_sa_email" { + type = string +} + +variable "master_sa_scopes" { + type = list(string) +} + +variable "master_preemptible" { + type = bool +} + +variable "master_additional_disk_type" { + type = string +} + +variable "worker_sa_email" { + type = string +} + +variable "worker_sa_scopes" { + type = list(string) +} + +variable "worker_preemptible" { + type = bool +} + +variable "worker_additional_disk_type" { + type = string +} + +variable "ssh_pub_key" {} + +variable "ssh_whitelist" { + type = list(string) +} + +variable "api_server_whitelist" { + type = list(string) +} + +variable "nodeport_whitelist" { + type = list(string) +} + +variable "ingress_whitelist" { + type = list(string) + default = ["0.0.0.0/0"] +} + +variable "private_network_cidr" { + default = "10.0.10.0/24" +} diff --git a/kubespray/contrib/terraform/gcp/output.tf b/kubespray/contrib/terraform/gcp/output.tf new file mode 100644 index 0000000..09bf7fa --- /dev/null +++ b/kubespray/contrib/terraform/gcp/output.tf @@ -0,0 +1,15 @@ +output "master_ips" { + value = module.kubernetes.master_ip_addresses +} + +output "worker_ips" { + value = module.kubernetes.worker_ip_addresses +} + +output "ingress_controller_lb_ip_address" { + value = module.kubernetes.ingress_controller_lb_ip_address +} + +output "control_plane_lb_ip_address" { + value = module.kubernetes.control_plane_lb_ip_address +} diff --git a/kubespray/contrib/terraform/gcp/tfvars.json b/kubespray/contrib/terraform/gcp/tfvars.json new file mode 100644 index 0000000..056b8fe --- /dev/null +++ b/kubespray/contrib/terraform/gcp/tfvars.json @@ -0,0 +1,63 @@ +{ + "gcp_project_id": "GCP_PROJECT_ID", + "region": "us-central1", + "ssh_pub_key": "~/.ssh/id_rsa.pub", + + "keyfile_location": "service-account.json", + + "prefix": "development", + + "ssh_whitelist": [ + "1.2.3.4/32" + ], + "api_server_whitelist": [ + "1.2.3.4/32" + ], + "nodeport_whitelist": [ + "1.2.3.4/32" + ], + "ingress_whitelist": [ + "0.0.0.0/0" + ], + + "machines": { + "master-0": { + "node_type": "master", + "size": "n1-standard-2", + "zone": "us-central1-a", + "additional_disks": {}, + "boot_disk": { + "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", + "size": 50 + } + }, + "worker-0": { + "node_type": "worker", + "size": "n1-standard-8", + "zone": "us-central1-a", + "additional_disks": { + "extra-disk-1": { + "size": 100 + } + }, + "boot_disk": { + "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", + "size": 50 + } + }, + "worker-1": { + "node_type": "worker", + "size": "n1-standard-8", + "zone": "us-central1-a", + "additional_disks": { + "extra-disk-1": { + "size": 100 + } + }, + "boot_disk": { + "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", + "size": 50 + } + } + } +} diff --git a/kubespray/contrib/terraform/gcp/variables.tf b/kubespray/contrib/terraform/gcp/variables.tf new file mode 100644 index 0000000..3b7bd00 --- /dev/null +++ b/kubespray/contrib/terraform/gcp/variables.tf @@ -0,0 +1,97 @@ +variable keyfile_location { + description = "Location of the json keyfile to use with the google provider" + type = string +} + +variable region { + description = "Region of all resources" + type = string +} + +variable gcp_project_id { + description = "ID of the project" + type = string +} + +variable prefix { + description = "Prefix for resource names" + default = "default" +} + +variable machines { + description = "Cluster machines" + type = map(object({ + node_type = string + size = string + zone = string + additional_disks = map(object({ + size = number + })) + boot_disk = object({ + image_name = string + size = number + }) + })) +} + +variable "master_sa_email" { + type = string + default = "" +} + +variable "master_sa_scopes" { + type = list(string) + default = ["https://www.googleapis.com/auth/cloud-platform"] +} + +variable "master_preemptible" { + type = bool + default = false +} + +variable "master_additional_disk_type" { + type = string + default = "pd-ssd" +} + +variable "worker_sa_email" { + type = string + default = "" +} + +variable "worker_sa_scopes" { + type = list(string) + default = ["https://www.googleapis.com/auth/cloud-platform"] +} + +variable "worker_preemptible" { + type = bool + default = false +} + +variable "worker_additional_disk_type" { + type = string + default = "pd-ssd" +} + +variable ssh_pub_key { + description = "Path to public SSH key file which is injected into the VMs." + type = string +} + +variable ssh_whitelist { + type = list(string) +} + +variable api_server_whitelist { + type = list(string) +} + +variable nodeport_whitelist { + type = list(string) +} + +variable "ingress_whitelist" { + type = list(string) + default = ["0.0.0.0/0"] +} diff --git a/kubespray/contrib/terraform/group_vars/all/all.yml b/kubespray/contrib/terraform/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/group_vars/all/aws.yml b/kubespray/contrib/terraform/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/group_vars/all/azure.yml b/kubespray/contrib/terraform/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/group_vars/all/containerd.yml b/kubespray/contrib/terraform/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/group_vars/all/coreos.yml b/kubespray/contrib/terraform/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/group_vars/all/docker.yml b/kubespray/contrib/terraform/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/group_vars/all/etcd.yml b/kubespray/contrib/terraform/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/group_vars/all/gcp.yml b/kubespray/contrib/terraform/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/group_vars/all/oci.yml b/kubespray/contrib/terraform/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/group_vars/all/offline.yml b/kubespray/contrib/terraform/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/group_vars/all/openstack.yml b/kubespray/contrib/terraform/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/group_vars/etcd.yml b/kubespray/contrib/terraform/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/hetzner/README.md b/kubespray/contrib/terraform/hetzner/README.md new file mode 100644 index 0000000..fdc43f9 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/README.md @@ -0,0 +1,108 @@ +# Kubernetes on Hetzner with Terraform + +Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray + +## Overview + +The setup looks like following + +```text + Kubernetes cluster ++--------------------------+ +| +--------------+ | +| | +--------------+ | +| --> | | | | +| | | Master/etcd | | +| | | node(s) | | +| +-+ | | +| +--------------+ | +| ^ | +| | | +| v | +| +--------------+ | +| | +--------------+ | +| --> | | | | +| | | Worker | | +| | | node(s) | | +| +-+ | | +| +--------------+ | ++--------------------------+ +``` + +The nodes uses a private network for node to node communication and a public interface for all external communication. + +## Requirements + +* Terraform 0.14.0 or newer + +## Quickstart + +NOTE: Assumes you are at the root of the kubespray repo. + +For authentication in your cluster you can use the environment variables. + +```bash +export HCLOUD_TOKEN=api-token +``` + +Copy the cluster configuration file. + +```bash +CLUSTER=my-hetzner-cluster +cp -r inventory/sample inventory/$CLUSTER +cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/ +cd inventory/$CLUSTER +``` + +Edit `default.tfvars` to match your requirement. + +Run Terraform to create the infrastructure. + +```bash +terraform init ../../contrib/terraform/hetzner +terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/ +``` + +You should now have a inventory file named `inventory.ini` that you can use with kubespray. +You can use the inventory file with kubespray to set up a cluster. + +It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: + +```bash +ansible -i inventory.ini -m ping all +``` + +You can setup Kubernetes with kubespray using the generated inventory: + +```bash +ansible-playbook -i inventory.ini ../../cluster.yml -b -v +``` + +## Cloud controller + +For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver). + +Please read the instructions in both repos on how to install it. + +## Teardown + +You can teardown your infrastructure using the following Terraform command: + +```bash +terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner +``` + +## Variables + +* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix +* `ssh_public_keys`: List of public SSH keys to install on all machines +* `zone`: The zone where to run the cluster +* `network_zone`: the network zone where the cluster is running +* `machines`: Machines to provision. Key of this object will be used as the name of the machine + * `node_type`: The role of this node *(master|worker)* + * `size`: Size of the VM + * `image`: The image to use for the VM +* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes +* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server +* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) +* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443 diff --git a/kubespray/contrib/terraform/hetzner/default.tfvars b/kubespray/contrib/terraform/hetzner/default.tfvars new file mode 100644 index 0000000..957b2d5 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/default.tfvars @@ -0,0 +1,44 @@ +prefix = "default" +zone = "hel1" +network_zone = "eu-central" +inventory_file = "inventory.ini" + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa I-did-not-read-the-docs", + "ssh-rsa I-did-not-read-the-docs 2", +] + +machines = { + "master-0" : { + "node_type" : "master", + "size" : "cx21", + "image" : "ubuntu-20.04", + }, + "worker-0" : { + "node_type" : "worker", + "size" : "cx21", + "image" : "ubuntu-20.04", + }, + "worker-1" : { + "node_type" : "worker", + "size" : "cx21", + "image" : "ubuntu-20.04", + } +} + +nodeport_whitelist = [ + "0.0.0.0/0" +] + +ingress_whitelist = [ + "0.0.0.0/0" +] + +ssh_whitelist = [ + "0.0.0.0/0" +] + +api_server_whitelist = [ + "0.0.0.0/0" +] diff --git a/kubespray/contrib/terraform/hetzner/main.tf b/kubespray/contrib/terraform/hetzner/main.tf new file mode 100644 index 0000000..805c7bf --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/main.tf @@ -0,0 +1,52 @@ +provider "hcloud" {} + +module "kubernetes" { + source = "./modules/kubernetes-cluster" + + prefix = var.prefix + + zone = var.zone + + machines = var.machines + + ssh_public_keys = var.ssh_public_keys + network_zone = var.network_zone + + ssh_whitelist = var.ssh_whitelist + api_server_whitelist = var.api_server_whitelist + nodeport_whitelist = var.nodeport_whitelist + ingress_whitelist = var.ingress_whitelist +} + +# +# Generate ansible inventory +# + +data "template_file" "inventory" { + template = file("${path.module}/templates/inventory.tpl") + + vars = { + connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", + keys(module.kubernetes.master_ip_addresses), + values(module.kubernetes.master_ip_addresses).*.public_ip, + values(module.kubernetes.master_ip_addresses).*.private_ip, + range(1, length(module.kubernetes.master_ip_addresses) + 1))) + connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", + keys(module.kubernetes.worker_ip_addresses), + values(module.kubernetes.worker_ip_addresses).*.public_ip, + values(module.kubernetes.worker_ip_addresses).*.private_ip)) + list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) + list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) + network_id = module.kubernetes.network_id + } +} + +resource "null_resource" "inventories" { + provisioner "local-exec" { + command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" + } + + triggers = { + template = data.template_file.inventory.rendered + } +} diff --git a/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf new file mode 100644 index 0000000..d7ec865 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf @@ -0,0 +1,122 @@ +resource "hcloud_network" "kubernetes" { + name = "${var.prefix}-network" + ip_range = var.private_network_cidr +} + +resource "hcloud_network_subnet" "kubernetes" { + type = "cloud" + network_id = hcloud_network.kubernetes.id + network_zone = var.network_zone + ip_range = var.private_subnet_cidr +} + +resource "hcloud_server" "master" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "master" + } + + name = "${var.prefix}-${each.key}" + image = each.value.image + server_type = each.value.size + location = var.zone + + user_data = templatefile( + "${path.module}/templates/cloud-init.tmpl", + { + ssh_public_keys = var.ssh_public_keys + } + ) + + firewall_ids = [hcloud_firewall.master.id] +} + +resource "hcloud_server_network" "master" { + for_each = hcloud_server.master + + server_id = each.value.id + + subnet_id = hcloud_network_subnet.kubernetes.id +} + +resource "hcloud_server" "worker" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "worker" + } + + name = "${var.prefix}-${each.key}" + image = each.value.image + server_type = each.value.size + location = var.zone + + user_data = templatefile( + "${path.module}/templates/cloud-init.tmpl", + { + ssh_public_keys = var.ssh_public_keys + } + ) + + firewall_ids = [hcloud_firewall.worker.id] + +} + +resource "hcloud_server_network" "worker" { + for_each = hcloud_server.worker + + server_id = each.value.id + + subnet_id = hcloud_network_subnet.kubernetes.id +} + +resource "hcloud_firewall" "master" { + name = "${var.prefix}-master-firewall" + + rule { + direction = "in" + protocol = "tcp" + port = "22" + source_ips = var.ssh_whitelist + } + + rule { + direction = "in" + protocol = "tcp" + port = "6443" + source_ips = var.api_server_whitelist + } +} + +resource "hcloud_firewall" "worker" { + name = "${var.prefix}-worker-firewall" + + rule { + direction = "in" + protocol = "tcp" + port = "22" + source_ips = var.ssh_whitelist + } + + rule { + direction = "in" + protocol = "tcp" + port = "80" + source_ips = var.ingress_whitelist + } + + rule { + direction = "in" + protocol = "tcp" + port = "443" + source_ips = var.ingress_whitelist + } + + rule { + direction = "in" + protocol = "tcp" + port = "30000-32767" + source_ips = var.nodeport_whitelist + } +} diff --git a/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf new file mode 100644 index 0000000..c6bb276 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf @@ -0,0 +1,27 @@ +output "master_ip_addresses" { + value = { + for key, instance in hcloud_server.master : + instance.name => { + "private_ip" = hcloud_server_network.master[key].ip + "public_ip" = hcloud_server.master[key].ipv4_address + } + } +} + +output "worker_ip_addresses" { + value = { + for key, instance in hcloud_server.worker : + instance.name => { + "private_ip" = hcloud_server_network.worker[key].ip + "public_ip" = hcloud_server.worker[key].ipv4_address + } + } +} + +output "cluster_private_network_cidr" { + value = var.private_subnet_cidr +} + +output "network_id" { + value = hcloud_network.kubernetes.id +} \ No newline at end of file diff --git a/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl new file mode 100644 index 0000000..c81aef5 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl @@ -0,0 +1,17 @@ +#cloud-config + +users: + - default + - name: ubuntu + shell: /bin/bash + sudo: "ALL=(ALL) NOPASSWD:ALL" + ssh_authorized_keys: + %{ for ssh_public_key in ssh_public_keys ~} + - ${ssh_public_key} + %{ endfor ~} + +ssh_authorized_keys: +%{ for ssh_public_key in ssh_public_keys ~} + - ${ssh_public_key} +%{ endfor ~} + diff --git a/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..7486e08 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf @@ -0,0 +1,44 @@ +variable "zone" { + type = string +} + +variable "prefix" {} + +variable "machines" { + type = map(object({ + node_type = string + size = string + image = string + })) +} + +variable "ssh_public_keys" { + type = list(string) +} + +variable "ssh_whitelist" { + type = list(string) +} + +variable "api_server_whitelist" { + type = list(string) +} + +variable "nodeport_whitelist" { + type = list(string) +} + +variable "ingress_whitelist" { + type = list(string) +} + +variable "private_network_cidr" { + default = "10.0.0.0/16" +} + +variable "private_subnet_cidr" { + default = "10.0.10.0/24" +} +variable "network_zone" { + default = "eu-central" +} diff --git a/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf new file mode 100644 index 0000000..2cea1c2 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + version = "1.31.1" + } + } + required_version = ">= 0.14" +} diff --git a/kubespray/contrib/terraform/hetzner/output.tf b/kubespray/contrib/terraform/hetzner/output.tf new file mode 100644 index 0000000..0336f72 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/output.tf @@ -0,0 +1,7 @@ +output "master_ips" { + value = module.kubernetes.master_ip_addresses +} + +output "worker_ips" { + value = module.kubernetes.worker_ip_addresses +} diff --git a/kubespray/contrib/terraform/hetzner/templates/inventory.tpl b/kubespray/contrib/terraform/hetzner/templates/inventory.tpl new file mode 100644 index 0000000..56666e1 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/templates/inventory.tpl @@ -0,0 +1,19 @@ +[all] +${connection_strings_master} +${connection_strings_worker} + +[kube_control_plane] +${list_master} + +[etcd] +${list_master} + +[kube_node] +${list_worker} + +[k8s_cluster:children] +kube-master +kube-node + +[k8s_cluster:vars] +network_id=${network_id} diff --git a/kubespray/contrib/terraform/hetzner/variables.tf b/kubespray/contrib/terraform/hetzner/variables.tf new file mode 100644 index 0000000..e83676a --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/variables.tf @@ -0,0 +1,50 @@ +variable "zone" { + description = "The zone where to run the cluster" +} +variable "network_zone" { + description = "The network zone where the cluster is running" + default = "eu-central" +} + +variable "prefix" { + description = "Prefix for resource names" + default = "default" +} + +variable "machines" { + description = "Cluster machines" + type = map(object({ + node_type = string + size = string + image = string + })) +} + +variable "ssh_public_keys" { + description = "Public SSH key which are injected into the VMs." + type = list(string) +} + +variable "ssh_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for ssh" + type = list(string) +} + +variable "api_server_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" + type = list(string) +} + +variable "nodeport_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" + type = list(string) +} + +variable "ingress_whitelist" { + description = "List of IP ranges (CIDR) to whitelist for HTTP" + type = list(string) +} + +variable "inventory_file" { + description = "Where to store the generated inventory file" +} diff --git a/kubespray/contrib/terraform/hetzner/versions.tf b/kubespray/contrib/terraform/hetzner/versions.tf new file mode 100644 index 0000000..02e5b74 --- /dev/null +++ b/kubespray/contrib/terraform/hetzner/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + version = "1.31.1" + } + null = { + source = "hashicorp/null" + } + template = { + source = "hashicorp/template" + } + } + required_version = ">= 0.14" +} diff --git a/kubespray/contrib/terraform/metal/README.md b/kubespray/contrib/terraform/metal/README.md new file mode 100644 index 0000000..e49e6c9 --- /dev/null +++ b/kubespray/contrib/terraform/metal/README.md @@ -0,0 +1,243 @@ +# Kubernetes on Equinix Metal with Terraform + +Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on +[Equinix Metal](https://metal.equinix.com) ([formerly Packet](https://blog.equinix.com/blog/2020/10/06/equinix-metal-metal-and-more/)). + +## Status + +This will install a Kubernetes cluster on Equinix Metal. It should work in all locations and on most server types. + +## Approach + +The terraform configuration inspects variables found in +[variables.tf](variables.tf) to create resources in your Equinix Metal project. +There is a [python script](../terraform.py) that reads the generated`.tfstate` +file to generate a dynamic inventory that is consumed by [cluster.yml](../../..//cluster.yml) +to actually install Kubernetes with Kubespray. + +### Kubernetes Nodes + +You can create many different kubernetes topologies by setting the number of +different classes of hosts. + +- Master nodes with etcd: `number_of_k8s_masters` variable +- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable +- Standalone etcd hosts: `number_of_etcd` variable +- Kubernetes worker nodes: `number_of_k8s_nodes` variable + +Note that the Ansible script will report an invalid configuration if you wind up +with an *even number* of etcd instances since that is not a valid configuration. This +restriction includes standalone etcd nodes that are deployed in a cluster along with +master nodes with etcd replicas. As an example, if you have three master nodes with +etcd replicas and three standalone etcd nodes, the script will fail since there are +now six total etcd replicas. + +## Requirements + +- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) +- [Install Ansible dependencies](/docs/ansible.md#installing-ansible) +- Account with Equinix Metal +- An SSH key pair + +## SSH Key Setup + +An SSH keypair is required so Ansible can access the newly provisioned nodes (Equinix Metal hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Equinix Metal (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error. + +If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command: + +```ShellSession +ssh-keygen -f ~/.ssh/id_rsa +``` + +## Terraform + +Terraform will be used to provision all of the Equinix Metal resources with base software as appropriate. + +### Configuration + +#### Inventory files + +Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state): + +```ShellSession +cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER +cd inventory/$CLUSTER +ln -s ../../contrib/terraform/metal/hosts +``` + +This will be the base for subsequent Terraform commands. + +#### Equinix Metal API access + +Your Equinix Metal API key must be available in the `PACKET_AUTH_TOKEN` environment variable. +This key is typically stored outside of the code repo since it is considered secret. +If someone gets this key, they can startup/shutdown hosts in your project! + +For more information on how to generate an API key or find your project ID, please see +[Accounts Index](https://metal.equinix.com/developers/docs/accounts/). + +The Equinix Metal Project ID associated with the key will be set later in `cluster.tfvars`. + +For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/). + +Example: + +```ShellSession +export PACKET_AUTH_TOKEN="Example-API-Token" +``` + +Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces). + +#### Cluster variables + +The construction of the cluster is driven by values found in +[variables.tf](variables.tf). + +For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. + +The `cluster_name` is used to set a tag on each server deployed as part of this cluster. +This helps when identifying which hosts are associated with each cluster. + +While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values: + +- cluster_name = the name of the inventory directory created above as $CLUSTER +- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above + +#### Enable localhost access + +Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the +`kubeconfig_localhost: true` in the Kubespray configuration. + +Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`: +`\# kubeconfig_localhost: false` +becomes: +`kubeconfig_localhost: true` + +Once the Kubespray playbooks are run, a Kubernetes configuration file will be written to the local host at `inventory/$CLUSTER/artifacts/admin.conf` + +#### Terraform state files + +In the cluster's inventory folder, the following files might be created (either by Terraform +or manually), to prevent you from pushing them accidentally they are in a +`.gitignore` file in the `terraform/metal` directory : + +- `.terraform` +- `.tfvars` +- `.tfstate` +- `.tfstate.backup` + +You can still add them manually if you want to. + +### Initialization + +Before Terraform can operate on your cluster you need to install the required +plugins. This is accomplished as follows: + +```ShellSession +cd inventory/$CLUSTER +terraform init ../../contrib/terraform/metal +``` + +This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. + +### Provisioning cluster + +You can apply the Terraform configuration to your cluster with the following command +issued from your cluster's inventory directory (`inventory/$CLUSTER`): + +```ShellSession +terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal +export ANSIBLE_HOST_KEY_CHECKING=False +ansible-playbook -i hosts ../../cluster.yml +``` + +### Destroying cluster + +You can destroy your new cluster with the following command issued from the cluster's inventory directory: + +```ShellSession +terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal +``` + +If you've started the Ansible run, it may also be a good idea to do some manual cleanup: + +- Remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file +- Clean up any temporary cache files: `rm /tmp/$CLUSTER-*` + +### Debugging + +You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` before running the Terraform command. + +## Ansible + +### Node access + +#### SSH + +Ensure your local ssh-agent is running and your ssh key has been added. This +step is required by the terraform provisioner: + +```ShellSession +eval $(ssh-agent -s) +ssh-add ~/.ssh/id_rsa +``` + +If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`). + +#### Test access + +Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`. + +```ShellSession +$ ansible -i inventory/$CLUSTER/hosts -m ping all +example-k8s_node-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +example-etcd-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +example-k8s-master-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +``` + +If it fails try to connect manually via SSH. It could be something as simple as a stale host key. + +### Deploy Kubernetes + +```ShellSession +ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml +``` + +This will take some time as there are many tasks to run. + +## Kubernetes + +### Set up kubectl + +- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost. +- Verify that Kubectl runs correctly + +```ShellSession +kubectl version +``` + +- Verify that the Kubernetes configuration file has been copied over + +```ShellSession +cat inventory/alpha/$CLUSTER/admin.conf +``` + +- Verify that all the nodes are running correctly. + +```ShellSession +kubectl version +kubectl --kubeconfig=inventory/$CLUSTER/artifacts/admin.conf get nodes +``` + +## What's next + +Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/). diff --git a/kubespray/contrib/terraform/metal/hosts b/kubespray/contrib/terraform/metal/hosts new file mode 100755 index 0000000..6c89e1c --- /dev/null +++ b/kubespray/contrib/terraform/metal/hosts @@ -0,0 +1,458 @@ +#!/usr/bin/env python3 +# +# Copyright 2015 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# original: https://github.com/CiscoCloud/terraform.py + +"""\ +Dynamic inventory for Terraform - finds all `.tfstate` files below the working +directory and generates an inventory based on them. +""" +import argparse +from collections import defaultdict +import random +from functools import wraps +import json +import os +import re + +VERSION = '0.4.0pre' + + +def tfstates(root=None): + root = root or os.getcwd() + for dirpath, _, filenames in os.walk(root): + for name in filenames: + if os.path.splitext(name)[-1] == '.tfstate': + yield os.path.join(dirpath, name) + +def convert_to_v3_structure(attributes, prefix=''): + """ Convert the attributes from v4 to v3 + Receives a dict and return a dictionary """ + result = {} + if isinstance(attributes, str): + # In the case when we receive a string (e.g. values for security_groups) + return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes} + for key, value in attributes.items(): + if isinstance(value, list): + if len(value): + result['{}{}.#'.format(prefix, key, hash)] = len(value) + for i, v in enumerate(value): + result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i))) + elif isinstance(value, dict): + result['{}{}.%'.format(prefix, key)] = len(value) + for k, v in value.items(): + result['{}{}.{}'.format(prefix, key, k)] = v + else: + result['{}{}'.format(prefix, key)] = value + return result + +def iterresources(filenames): + for filename in filenames: + with open(filename, 'r') as json_file: + state = json.load(json_file) + tf_version = state['version'] + if tf_version == 3: + for module in state['modules']: + name = module['path'][-1] + for key, resource in module['resources'].items(): + yield name, key, resource + elif tf_version == 4: + # In version 4 the structure changes so we need to iterate + # each instance inside the resource branch. + for resource in state['resources']: + name = resource['provider'].split('.')[-1] + for instance in resource['instances']: + key = "{}.{}".format(resource['type'], resource['name']) + if 'index_key' in instance: + key = "{}.{}".format(key, instance['index_key']) + data = {} + data['type'] = resource['type'] + data['provider'] = resource['provider'] + data['depends_on'] = instance.get('depends_on', []) + data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])} + if 'id' in instance['attributes']: + data['primary']['id'] = instance['attributes']['id'] + data['primary']['meta'] = instance['attributes'].get('meta',{}) + yield name, key, data + else: + raise KeyError('tfstate version %d not supported' % tf_version) + + +## READ RESOURCES +PARSERS = {} + + +def _clean_dc(dcname): + # Consul DCs are strictly alphanumeric with underscores and hyphens - + # ensure that the consul_dc attribute meets these requirements. + return re.sub('[^\w_\-]', '-', dcname) + + +def iterhosts(resources): + '''yield host tuples of (name, attributes, groups)''' + for module_name, key, resource in resources: + resource_type, name = key.split('.', 1) + try: + parser = PARSERS[resource_type] + except KeyError: + continue + + yield parser(resource, module_name) + + +def iterips(resources): + '''yield ip tuples of (port_id, ip)''' + for module_name, key, resource in resources: + resource_type, name = key.split('.', 1) + if resource_type == 'openstack_networking_floatingip_associate_v2': + yield openstack_floating_ips(resource) + + +def parses(prefix): + def inner(func): + PARSERS[prefix] = func + return func + + return inner + + +def calculate_mantl_vars(func): + """calculate Mantl vars""" + + @wraps(func) + def inner(*args, **kwargs): + name, attrs, groups = func(*args, **kwargs) + + # attrs + if attrs.get('role', '') == 'control': + attrs['consul_is_server'] = True + else: + attrs['consul_is_server'] = False + + # groups + if attrs.get('publicly_routable', False): + groups.append('publicly_routable') + + return name, attrs, groups + + return inner + + +def _parse_prefix(source, prefix, sep='.'): + for compkey, value in list(source.items()): + try: + curprefix, rest = compkey.split(sep, 1) + except ValueError: + continue + + if curprefix != prefix or rest == '#': + continue + + yield rest, value + + +def parse_attr_list(source, prefix, sep='.'): + attrs = defaultdict(dict) + for compkey, value in _parse_prefix(source, prefix, sep): + idx, key = compkey.split(sep, 1) + attrs[idx][key] = value + + return list(attrs.values()) + + +def parse_dict(source, prefix, sep='.'): + return dict(_parse_prefix(source, prefix, sep)) + + +def parse_list(source, prefix, sep='.'): + return [value for _, value in _parse_prefix(source, prefix, sep)] + + +def parse_bool(string_form): + if type(string_form) is bool: + return string_form + + token = string_form.lower()[0] + + if token == 't': + return True + elif token == 'f': + return False + else: + raise ValueError('could not convert %r to a bool' % string_form) + + +@parses('metal_device') +def metal_device(resource, tfvars=None): + raw_attrs = resource['primary']['attributes'] + name = raw_attrs['hostname'] + groups = [] + + attrs = { + 'id': raw_attrs['id'], + 'facilities': parse_list(raw_attrs, 'facilities'), + 'hostname': raw_attrs['hostname'], + 'operating_system': raw_attrs['operating_system'], + 'locked': parse_bool(raw_attrs['locked']), + 'tags': parse_list(raw_attrs, 'tags'), + 'plan': raw_attrs['plan'], + 'project_id': raw_attrs['project_id'], + 'state': raw_attrs['state'], + # ansible + 'ansible_host': raw_attrs['network.0.address'], + 'ansible_ssh_user': 'root', # Use root by default in metal + # generic + 'ipv4_address': raw_attrs['network.0.address'], + 'public_ipv4': raw_attrs['network.0.address'], + 'ipv6_address': raw_attrs['network.1.address'], + 'public_ipv6': raw_attrs['network.1.address'], + 'private_ipv4': raw_attrs['network.2.address'], + 'provider': 'metal', + } + + if raw_attrs['operating_system'] == 'flatcar_stable': + # For Flatcar set the ssh_user to core + attrs.update({'ansible_ssh_user': 'core'}) + + # add groups based on attrs + groups.append('metal_operating_system=' + attrs['operating_system']) + groups.append('metal_locked=%s' % attrs['locked']) + groups.append('metal_state=' + attrs['state']) + groups.append('metal_plan=' + attrs['plan']) + + # groups specific to kubespray + groups = groups + attrs['tags'] + + return name, attrs, groups + + +def openstack_floating_ips(resource): + raw_attrs = resource['primary']['attributes'] + attrs = { + 'ip': raw_attrs['floating_ip'], + 'port_id': raw_attrs['port_id'], + } + return attrs + +def openstack_floating_ips(resource): + raw_attrs = resource['primary']['attributes'] + return raw_attrs['port_id'], raw_attrs['floating_ip'] + +@parses('openstack_compute_instance_v2') +@calculate_mantl_vars +def openstack_host(resource, module_name): + raw_attrs = resource['primary']['attributes'] + name = raw_attrs['name'] + groups = [] + + attrs = { + 'access_ip_v4': raw_attrs['access_ip_v4'], + 'access_ip_v6': raw_attrs['access_ip_v6'], + 'access_ip': raw_attrs['access_ip_v4'], + 'ip': raw_attrs['network.0.fixed_ip_v4'], + 'flavor': parse_dict(raw_attrs, 'flavor', + sep='_'), + 'id': raw_attrs['id'], + 'image': parse_dict(raw_attrs, 'image', + sep='_'), + 'key_pair': raw_attrs['key_pair'], + 'metadata': parse_dict(raw_attrs, 'metadata'), + 'network': parse_attr_list(raw_attrs, 'network'), + 'region': raw_attrs.get('region', ''), + 'security_groups': parse_list(raw_attrs, 'security_groups'), + # ansible + 'ansible_ssh_port': 22, + # workaround for an OpenStack bug where hosts have a different domain + # after they're restarted + 'host_domain': 'novalocal', + 'use_host_domain': True, + # generic + 'public_ipv4': raw_attrs['access_ip_v4'], + 'private_ipv4': raw_attrs['access_ip_v4'], + 'port_id' : raw_attrs['network.0.port'], + 'provider': 'openstack', + } + + if 'floating_ip' in raw_attrs: + attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4'] + + try: + if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1": + attrs.update({ + 'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']), + 'publicly_routable': True, + }) + else: + attrs.update({ + 'ansible_host': raw_attrs['access_ip_v4'], + 'publicly_routable': True, + }) + except (KeyError, ValueError): + attrs.update({'ansible_host': '', 'publicly_routable': False}) + + # Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017 + + # attrs specific to Ansible + if 'metadata.ssh_user' in raw_attrs: + attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user'] + + if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0: + device_index = 1 + for key, value in list(raw_attrs.items()): + match = re.search("^volume.*.device$", key) + if match: + attrs['disk_volume_device_'+str(device_index)] = value + device_index += 1 + + + # attrs specific to Mantl + attrs.update({ + 'role': attrs['metadata'].get('role', 'none') + }) + + # add groups based on attrs + groups.append('os_image=' + str(attrs['image']['id'])) + groups.append('os_flavor=' + str(attrs['flavor']['name'])) + groups.extend('os_metadata_%s=%s' % item + for item in list(attrs['metadata'].items())) + groups.append('os_region=' + str(attrs['region'])) + + # groups specific to kubespray + for group in attrs['metadata'].get('kubespray_groups', "").split(","): + groups.append(group) + + return name, attrs, groups + + +def iter_host_ips(hosts, ips): + '''Update hosts that have an entry in the floating IP list''' + for host in hosts: + port_id = host[1]['port_id'] + + if port_id in ips: + ip = ips[port_id] + + host[1].update({ + 'access_ip_v4': ip, + 'access_ip': ip, + 'public_ipv4': ip, + 'ansible_host': ip, + }) + + if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0": + host[1].pop('access_ip') + + yield host + + +## QUERY TYPES +def query_host(hosts, target): + for name, attrs, _ in hosts: + if name == target: + return attrs + + return {} + + +def query_list(hosts): + groups = defaultdict(dict) + meta = {} + + for name, attrs, hostgroups in hosts: + for group in set(hostgroups): + # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf + # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all" + if not group: group = "all" + + groups[group].setdefault('hosts', []) + groups[group]['hosts'].append(name) + + meta[name] = attrs + + groups['_meta'] = {'hostvars': meta} + return groups + + +def query_hostfile(hosts): + out = ['## begin hosts generated by terraform.py ##'] + out.extend( + '{}\t{}'.format(attrs['ansible_host'].ljust(16), name) + for name, attrs, _ in hosts + ) + + out.append('## end hosts generated by terraform.py ##') + return '\n'.join(out) + + +def main(): + parser = argparse.ArgumentParser( + __file__, __doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) + modes = parser.add_mutually_exclusive_group(required=True) + modes.add_argument('--list', + action='store_true', + help='list all variables') + modes.add_argument('--host', help='list variables for a single host') + modes.add_argument('--version', + action='store_true', + help='print version and exit') + modes.add_argument('--hostfile', + action='store_true', + help='print hosts as a /etc/hosts snippet') + parser.add_argument('--pretty', + action='store_true', + help='pretty-print output JSON') + parser.add_argument('--nometa', + action='store_true', + help='with --list, exclude hostvars') + default_root = os.environ.get('TERRAFORM_STATE_ROOT', + os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', '..', ))) + parser.add_argument('--root', + default=default_root, + help='custom root to search for `.tfstate`s in') + + args = parser.parse_args() + + if args.version: + print('%s %s' % (__file__, VERSION)) + parser.exit() + + hosts = iterhosts(iterresources(tfstates(args.root))) + + # Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts + ips = dict(iterips(iterresources(tfstates(args.root)))) + + if ips: + hosts = iter_host_ips(hosts, ips) + + if args.list: + output = query_list(hosts) + if args.nometa: + del output['_meta'] + print(json.dumps(output, indent=4 if args.pretty else None)) + elif args.host: + output = query_host(hosts, args.host) + print(json.dumps(output, indent=4 if args.pretty else None)) + elif args.hostfile: + output = query_hostfile(hosts) + print(output) + + parser.exit() + + +if __name__ == '__main__': + main() diff --git a/kubespray/contrib/terraform/metal/kubespray.tf b/kubespray/contrib/terraform/metal/kubespray.tf new file mode 100644 index 0000000..c8019e5 --- /dev/null +++ b/kubespray/contrib/terraform/metal/kubespray.tf @@ -0,0 +1,62 @@ +# Configure the Equinix Metal Provider +provider "metal" { +} + +resource "metal_ssh_key" "k8s" { + count = var.public_key_path != "" ? 1 : 0 + name = "kubernetes-${var.cluster_name}" + public_key = chomp(file(var.public_key_path)) +} + +resource "metal_device" "k8s_master" { + depends_on = [metal_ssh_key.k8s] + + count = var.number_of_k8s_masters + hostname = "${var.cluster_name}-k8s-master-${count.index + 1}" + plan = var.plan_k8s_masters + facilities = [var.facility] + operating_system = var.operating_system + billing_cycle = var.billing_cycle + project_id = var.metal_project_id + tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"] +} + +resource "metal_device" "k8s_master_no_etcd" { + depends_on = [metal_ssh_key.k8s] + + count = var.number_of_k8s_masters_no_etcd + hostname = "${var.cluster_name}-k8s-master-${count.index + 1}" + plan = var.plan_k8s_masters_no_etcd + facilities = [var.facility] + operating_system = var.operating_system + billing_cycle = var.billing_cycle + project_id = var.metal_project_id + tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"] +} + +resource "metal_device" "k8s_etcd" { + depends_on = [metal_ssh_key.k8s] + + count = var.number_of_etcd + hostname = "${var.cluster_name}-etcd-${count.index + 1}" + plan = var.plan_etcd + facilities = [var.facility] + operating_system = var.operating_system + billing_cycle = var.billing_cycle + project_id = var.metal_project_id + tags = ["cluster-${var.cluster_name}", "etcd"] +} + +resource "metal_device" "k8s_node" { + depends_on = [metal_ssh_key.k8s] + + count = var.number_of_k8s_nodes + hostname = "${var.cluster_name}-k8s-node-${count.index + 1}" + plan = var.plan_k8s_nodes + facilities = [var.facility] + operating_system = var.operating_system + billing_cycle = var.billing_cycle + project_id = var.metal_project_id + tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"] +} + diff --git a/kubespray/contrib/terraform/metal/output.tf b/kubespray/contrib/terraform/metal/output.tf new file mode 100644 index 0000000..262d91b --- /dev/null +++ b/kubespray/contrib/terraform/metal/output.tf @@ -0,0 +1,16 @@ +output "k8s_masters" { + value = metal_device.k8s_master.*.access_public_ipv4 +} + +output "k8s_masters_no_etc" { + value = metal_device.k8s_master_no_etcd.*.access_public_ipv4 +} + +output "k8s_etcds" { + value = metal_device.k8s_etcd.*.access_public_ipv4 +} + +output "k8s_nodes" { + value = metal_device.k8s_node.*.access_public_ipv4 +} + diff --git a/kubespray/contrib/terraform/metal/sample-inventory/cluster.tfvars b/kubespray/contrib/terraform/metal/sample-inventory/cluster.tfvars new file mode 100644 index 0000000..f167aeb --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/cluster.tfvars @@ -0,0 +1,32 @@ +# your Kubernetes cluster name here +cluster_name = "mycluster" + +# Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/ +metal_project_id = "Example-API-Token" + +# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned +# leave this value blank if the public key is already setup in the Equinix Metal project +# Terraform will complain if the public key is setup in Equinix Metal +public_key_path = "~/.ssh/id_rsa.pub" + +# cluster location +facility = "ewr1" + +# standalone etcds +number_of_etcd = 0 + +plan_etcd = "t1.small.x86" + +# masters +number_of_k8s_masters = 1 + +number_of_k8s_masters_no_etcd = 0 + +plan_k8s_masters = "t1.small.x86" + +plan_k8s_masters_no_etcd = "t1.small.x86" + +# nodes +number_of_k8s_nodes = 2 + +plan_k8s_nodes = "t1.small.x86" diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/all.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/aws.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/azure.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/containerd.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/coreos.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/docker.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/etcd.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/gcp.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/oci.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/offline.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/openstack.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/etcd.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/metal/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/metal/variables.tf b/kubespray/contrib/terraform/metal/variables.tf new file mode 100644 index 0000000..f0c9b28 --- /dev/null +++ b/kubespray/contrib/terraform/metal/variables.tf @@ -0,0 +1,57 @@ +variable "cluster_name" { + default = "kubespray" +} + +variable "metal_project_id" { + description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/" +} + +variable "operating_system" { + default = "ubuntu_20_04" +} + +variable "public_key_path" { + description = "The path of the ssh pub key" + default = "~/.ssh/id_rsa.pub" +} + +variable "billing_cycle" { + default = "hourly" +} + +variable "facility" { + default = "dfw2" +} + +variable "plan_k8s_masters" { + default = "c3.small.x86" +} + +variable "plan_k8s_masters_no_etcd" { + default = "c3.small.x86" +} + +variable "plan_etcd" { + default = "c3.small.x86" +} + +variable "plan_k8s_nodes" { + default = "c3.medium.x86" +} + +variable "number_of_k8s_masters" { + default = 1 +} + +variable "number_of_k8s_masters_no_etcd" { + default = 0 +} + +variable "number_of_etcd" { + default = 0 +} + +variable "number_of_k8s_nodes" { + default = 1 +} + diff --git a/kubespray/contrib/terraform/metal/versions.tf b/kubespray/contrib/terraform/metal/versions.tf new file mode 100644 index 0000000..637203f --- /dev/null +++ b/kubespray/contrib/terraform/metal/versions.tf @@ -0,0 +1,9 @@ + +terraform { + required_version = ">= 0.12" + required_providers { + metal = { + source = "equinix/metal" + } + } +} diff --git a/kubespray/contrib/terraform/openstack/.gitignore b/kubespray/contrib/terraform/openstack/.gitignore new file mode 100644 index 0000000..55d775b --- /dev/null +++ b/kubespray/contrib/terraform/openstack/.gitignore @@ -0,0 +1,5 @@ +.terraform +*.tfvars +!sample-inventory\/cluster.tfvars +*.tfstate +*.tfstate.backup diff --git a/kubespray/contrib/terraform/openstack/README.md b/kubespray/contrib/terraform/openstack/README.md new file mode 100644 index 0000000..1379e52 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/README.md @@ -0,0 +1,763 @@ +# Kubernetes on OpenStack with Terraform + +Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on +OpenStack. + +## Status + +This will install a Kubernetes cluster on an OpenStack Cloud. It should work on +most modern installs of OpenStack that support the basic services. + +### Known compatible public clouds + +- [Auro](https://auro.io/) +- [Betacloud](https://www.betacloud.io/) +- [CityCloud](https://www.citycloud.com/) +- [DreamHost](https://www.dreamhost.com/cloud/computing/) +- [ELASTX](https://elastx.se/) +- [EnterCloudSuite](https://www.entercloudsuite.com/) +- [FugaCloud](https://fuga.cloud/) +- [Open Telekom Cloud](https://cloud.telekom.de/) +- [OVH](https://www.ovh.com/) +- [Rackspace](https://www.rackspace.com/) +- [Safespring](https://www.safespring.com) +- [Ultimum](https://ultimum.io/) +- [VexxHost](https://vexxhost.com/) +- [Zetta](https://www.zetta.io/) + +## Approach + +The terraform configuration inspects variables found in +[variables.tf](variables.tf) to create resources in your OpenStack cluster. +There is a [python script](../terraform.py) that reads the generated`.tfstate` +file to generate a dynamic inventory that is consumed by the main ansible script +to actually install kubernetes and stand up the cluster. + +### Networking + +The configuration includes creating a private subnet with a router to the +external net. It will allocate floating IPs from a pool and assign them to the +hosts where that makes sense. You have the option of creating bastion hosts +inside the private subnet to access the nodes there. Alternatively, a node with +a floating IP can be used as a jump host to nodes without. + +#### Using an existing router + +It is possible to use an existing router instead of creating one. To use an +existing router set the router\_id variable to the uuid of the router you wish +to use. + +For example: + +```ShellSession +router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3" +``` + +### Kubernetes Nodes + +You can create many different kubernetes topologies by setting the number of +different classes of hosts. For each class there are options for allocating +floating IP addresses or not. + +- Master nodes with etcd +- Master nodes without etcd +- Standalone etcd hosts +- Kubernetes worker nodes + +Note that the Ansible script will report an invalid configuration if you wind up +with an even number of etcd instances since that is not a valid configuration. This +restriction includes standalone etcd nodes that are deployed in a cluster along with +master nodes with etcd replicas. As an example, if you have three master nodes with +etcd replicas and three standalone etcd nodes, the script will fail since there are +now six total etcd replicas. + +### GlusterFS shared file system + +The Terraform configuration supports provisioning of an optional GlusterFS +shared file system based on a separate set of VMs. To enable this, you need to +specify: + +- the number of Gluster hosts (minimum 2) +- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks +- Other properties related to provisioning the hosts + +Even if you are using Flatcar Container Linux by Kinvolk for your cluster, you will still +need the GlusterFS VMs to be based on either Debian or RedHat based images. +Flatcar Container Linux by Kinvolk cannot serve GlusterFS, but can connect to it through +binaries available on hyperkube v1.4.3_coreos.0 or higher. + +## Requirements + +- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later +- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) +- you already have a suitable OS image in Glance +- you already have a floating IP pool created +- you have security groups enabled +- you have a pair of keys generated that can be used to secure the new hosts + +## Module Architecture + +The configuration is divided into three modules: + +- Network +- IPs +- Compute + +The main reason for splitting the configuration up in this way is to easily +accommodate situations where floating IPs are limited by a quota or if you have +any external references to the floating IP (e.g. DNS) that would otherwise have +to be updated. + +You can force your existing IPs by modifying the compute variables in +`kubespray.tf` as follows: + +```ini +k8s_master_fips = ["151.101.129.67"] +k8s_node_fips = ["151.101.129.68"] +``` + +## Terraform + +Terraform will be used to provision all of the OpenStack resources with base software as appropriate. + +### Configuration + +#### Inventory files + +Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state): + +```ShellSession +cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER +cd inventory/$CLUSTER +ln -s ../../contrib/terraform/openstack/hosts +ln -s ../../contrib +``` + +This will be the base for subsequent Terraform commands. + +#### OpenStack access and credentials + +No provider variables are hardcoded inside `variables.tf` because Terraform +supports various authentication methods for OpenStack: the older script and +environment method (using `openrc`) as well as a newer declarative method, and +different OpenStack environments may support Identity API version 2 or 3. + +These are examples and may vary depending on your OpenStack cloud provider, +for an exhaustive list on how to authenticate on OpenStack with Terraform +please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/). + +##### Declarative method (recommended) + +The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in: + +- the current directory +- `~/.config/openstack` +- `/etc/openstack` + +`clouds.yaml`: + +```yaml +clouds: + mycloud: + auth: + auth_url: https://openstack:5000/v3 + username: "username" + project_name: "projectname" + project_id: projectid + user_domain_name: "Default" + password: "password" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 +``` + +If you have multiple clouds defined in your `clouds.yaml` file you can choose +the one you want to use with the environment variable `OS_CLOUD`: + +```ShellSession +export OS_CLOUD=mycloud +``` + +##### Openrc method + +When using classic environment variables, Terraform uses default `OS_*` +environment variables. A script suitable for your environment may be available +from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*. + +With identity v2: + +```ShellSession +source openrc + +env | grep OS + +OS_AUTH_URL=https://openstack:5000/v2.0 +OS_PROJECT_ID=projectid +OS_PROJECT_NAME=projectname +OS_USERNAME=username +OS_PASSWORD=password +OS_REGION_NAME=RegionOne +OS_INTERFACE=public +OS_IDENTITY_API_VERSION=2 +``` + +With identity v3: + +```ShellSession +source openrc + +env | grep OS + +OS_AUTH_URL=https://openstack:5000/v3 +OS_PROJECT_ID=projectid +OS_PROJECT_NAME=username +OS_PROJECT_DOMAIN_ID=default +OS_USERNAME=username +OS_PASSWORD=password +OS_REGION_NAME=RegionOne +OS_INTERFACE=public +OS_IDENTITY_API_VERSION=3 +OS_USER_DOMAIN_NAME=Default +``` + +Terraform does not support a mix of DomainName and DomainID, choose one or the other: + +- provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username + +```ShellSession +unset OS_USER_DOMAIN_NAME +export OS_USER_DOMAIN_ID=default +``` + +or + +```ShellSession +unset OS_PROJECT_DOMAIN_ID +set OS_PROJECT_DOMAIN_NAME=Default +``` + +#### Cluster variables + +The construction of the cluster is driven by values found in +[variables.tf](variables.tf). + +For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. + +|Variable | Description | +|---------|-------------| +|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. | +|`az_list` | List of Availability Zones available in your OpenStack cluster. | +|`network_name` | The name to be given to the internal network that will be generated | +|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default | +|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated | +|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | +|`floatingip_pool` | Name of the pool from which floating IPs will be allocated | +|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. | +|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. | +|`external_net` | UUID of the external network that will be routed to | +|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` | +|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. | +|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected | +|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs | +|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses| +|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses | +|`number_of_etcd` | Number of pure etcd nodes | +|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. | +|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | +|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | +| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | +|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. | +|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. | +|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default | +|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default | +|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default | +|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | +|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | +|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | +|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage | +|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage | +|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default | +|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default | +|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage | +|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage | +|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage | +|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) | +|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) | +|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) | +|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. | +|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default | +|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default | +|`k8s_nodes` | Map containing worker node definition, see explanation below | +|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` | + +##### k8s_nodes + +Allows a custom definition of worker nodes giving the operator full control over individual node flavor and +availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and +`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration +using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory. +The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes. + +For example: + +```ini +k8s_nodes = { + "1" = { + "az" = "sto1" + "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" + "floating_ip" = true + }, + "2" = { + "az" = "sto2" + "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" + "floating_ip" = true + }, + "3" = { + "az" = "sto3" + "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" + "floating_ip" = true + "extra_groups" = "calico_rr" + } +} +``` + +Would result in the same configuration as: + +```ini +number_of_k8s_nodes = 3 +flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445" +az_list = ["sto1", "sto2", "sto3"] +``` + +And: + +```ini +k8s_nodes = { + "ing-1" = { + "az" = "sto1" + "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" + "floating_ip" = true + }, + "ing-2" = { + "az" = "sto2" + "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" + "floating_ip" = true + }, + "ing-3" = { + "az" = "sto3" + "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" + "floating_ip" = true + }, + "big-1" = { + "az" = "sto1" + "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" + "floating_ip" = false + }, + "big-2" = { + "az" = "sto2" + "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" + "floating_ip" = false + }, + "big-3" = { + "az" = "sto3" + "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" + "floating_ip" = false + }, + "small-1" = { + "az" = "sto1" + "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" + "floating_ip" = false + }, + "small-2" = { + "az" = "sto2" + "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" + "floating_ip" = false + }, + "small-3" = { + "az" = "sto3" + "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" + "floating_ip" = false + } +} +``` + +Would result in three nodes in each availability zone each with their own separate naming, +flavor and floating ip configuration. + +The "schema": + +```ini +k8s_nodes = { + "key | node name suffix, must be unique" = { + "az" = string + "flavor" = string + "floating_ip" = bool + }, +} +``` + +All values are required. + +#### Terraform state files + +In the cluster's inventory folder, the following files might be created (either by Terraform +or manually), to prevent you from pushing them accidentally they are in a +`.gitignore` file in the `terraform/openstack` directory : + +- `.terraform` +- `.tfvars` +- `.tfstate` +- `.tfstate.backup` + +You can still add them manually if you want to. + +### Initialization + +Before Terraform can operate on your cluster you need to install the required +plugins. This is accomplished as follows: + +```ShellSession +cd inventory/$CLUSTER +terraform -chdir="../../contrib/terraform/openstack" init +``` + +This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. + +### Customizing with cloud-init + +You can apply cloud-init based customization for the openstack instances before provisioning your cluster. +One common template is used for all instances. Adjust the file shown below: +`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml` +For example, to enable openstack novnc access and ansible_user=root SSH access: + +```ShellSession +#cloud-config +## in some cases novnc console access is required +## it requires ssh password to be set +ssh_pwauth: yes +chpasswd: + list: | + root:secret + expire: False + +## in some cases direct root ssh access via ssh key is required +disable_root: false +``` + +### Provisioning cluster + +You can apply the Terraform configuration to your cluster with the following command +issued from your cluster's inventory directory (`inventory/$CLUSTER`): + +```ShellSession +terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars +``` + +if you chose to create a bastion host, this script will create +`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to +be able to access your machines tunneling through the bastion's IP address. If +you want to manually handle the ssh tunneling to these machines, please delete +or move that file. If you want to use this, just leave it there, as ansible will +pick it up automatically. + +### Destroying cluster + +You can destroy your new cluster with the following command issued from the cluster's inventory directory: + +```ShellSession +terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars +``` + +If you've started the Ansible run, it may also be a good idea to do some manual cleanup: + +- remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file +- clean up any temporary cache files: `rm /tmp/$CLUSTER-*` + +### Debugging + +You can enable debugging output from Terraform by setting +`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command. + +### Terraform output + +Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment: + +- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id` +- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id` + +## Ansible + +### Node access + +#### SSH + +Ensure your local ssh-agent is running and your ssh key has been added. This +step is required by the terraform provisioner: + +```ShellSession +eval $(ssh-agent -s) +ssh-add ~/.ssh/id_rsa +``` + +If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`). + +#### Metadata variables + +The [python script](../terraform.py) that reads the +generated`.tfstate` file to generate a dynamic inventory recognizes +some variables within a "metadata" block, defined in a "resource" +block (example): + +```ini +resource "openstack_compute_instance_v2" "example" { + ... + metadata { + ssh_user = "ubuntu" + prefer_ipv6 = true + python_bin = "/usr/bin/python3" + } + ... +} +``` + +As the example shows, these let you define the SSH username for +Ansible, a Python binary which is needed by Ansible if +`/usr/bin/python` doesn't exist, and whether the IPv6 address of the +instance should be preferred over IPv4. + +#### Bastion host + +Bastion access will be determined by: + +- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable). +- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables). + +If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned. +If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines. + +So, either a bastion host, or at least master/node with a floating IP are required. + +#### Test access + +Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`. + +```ShellSession +$ ansible -i inventory/$CLUSTER/hosts -m ping all +example-k8s_node-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +example-etcd-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +example-k8s-master-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +``` + +If it fails try to connect manually via SSH. It could be something as simple as a stale host key. + +### Configure cluster variables + +Edit `inventory/$CLUSTER/group_vars/all/all.yml`: + +- **bin_dir**: + +```yml +# Directory where the binaries will be installed +# Default: +# bin_dir: /usr/local/bin +# For Flatcar Container Linux by Kinvolk: +bin_dir: /opt/bin +``` + +- and **cloud_provider**: + +```yml +cloud_provider: openstack +``` + +Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`: + +- Set variable **kube_network_plugin** to your desired networking plugin. + - **flannel** works out-of-the-box + - **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets + +```yml +# Choose network plugin (calico, weave or flannel) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: flannel +``` + +- Set variable **resolvconf_mode** + +```yml +# Can be docker_dns, host_resolvconf or none +# Default: +# resolvconf_mode: docker_dns +# For Flatcar Container Linux by Kinvolk: +resolvconf_mode: host_resolvconf +``` + +- Set max amount of attached cinder volume per host (default 256) + +```yml +node_volume_attach_limit: 26 +``` + +### Deploy Kubernetes + +```ShellSession +ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml +``` + +This will take some time as there are many tasks to run. + +## Kubernetes + +### Set up kubectl + +1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation +2. Add a route to the internal IP of a master node (if needed): + +```ShellSession +sudo route add [master-internal-ip] gw [router-ip] +``` + +or + +```ShellSession +sudo route add -net [internal-subnet]/24 gw [router-ip] +``` + +1. List Kubernetes certificates & keys: + +```ShellSession +ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/ +``` + +1. Get `admin`'s certificates and keys: + +```ShellSession +ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem +ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem +ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem +``` + +1. Configure kubectl: + +```ShellSession +$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \ + --certificate-authority=ca.pem + +$ kubectl config set-credentials default-admin \ + --certificate-authority=ca.pem \ + --client-key=admin-key.pem \ + --client-certificate=admin.pem + +$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin +$ kubectl config use-context default-system +``` + +1. Check it: + +```ShellSession +kubectl version +``` + +## GlusterFS + +GlusterFS is not deployed by the standard `cluster.yml` playbook, see the +[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md) +for instructions. + +Basically you will install Gluster as + +```ShellSession +ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml +``` + +## What's next + +Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/). + +## Appendix + +### Migration from `number_of_k8s_nodes*` to `k8s_nodes` + +If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish +to migrate to the `k8s_nodes` style you can do it like so: + +```ShellSession +$ terraform state list +module.compute.data.openstack_images_image_v2.gfs_image +module.compute.data.openstack_images_image_v2.vm_image +module.compute.openstack_compute_floatingip_associate_v2.k8s_master[0] +module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0] +module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1] +module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2] +module.compute.openstack_compute_instance_v2.k8s_master[0] +module.compute.openstack_compute_instance_v2.k8s_node[0] +module.compute.openstack_compute_instance_v2.k8s_node[1] +module.compute.openstack_compute_instance_v2.k8s_node[2] +module.compute.openstack_compute_keypair_v2.k8s +module.compute.openstack_compute_servergroup_v2.k8s_etcd[0] +module.compute.openstack_compute_servergroup_v2.k8s_master[0] +module.compute.openstack_compute_servergroup_v2.k8s_node[0] +module.compute.openstack_networking_secgroup_rule_v2.bastion[0] +module.compute.openstack_networking_secgroup_rule_v2.egress[0] +module.compute.openstack_networking_secgroup_rule_v2.k8s +module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[0] +module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[1] +module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[2] +module.compute.openstack_networking_secgroup_rule_v2.k8s_master[0] +module.compute.openstack_networking_secgroup_rule_v2.worker[0] +module.compute.openstack_networking_secgroup_rule_v2.worker[1] +module.compute.openstack_networking_secgroup_rule_v2.worker[2] +module.compute.openstack_networking_secgroup_rule_v2.worker[3] +module.compute.openstack_networking_secgroup_rule_v2.worker[4] +module.compute.openstack_networking_secgroup_v2.bastion[0] +module.compute.openstack_networking_secgroup_v2.k8s +module.compute.openstack_networking_secgroup_v2.k8s_master +module.compute.openstack_networking_secgroup_v2.worker +module.ips.null_resource.dummy_dependency +module.ips.openstack_networking_floatingip_v2.k8s_master[0] +module.ips.openstack_networking_floatingip_v2.k8s_node[0] +module.ips.openstack_networking_floatingip_v2.k8s_node[1] +module.ips.openstack_networking_floatingip_v2.k8s_node[2] +module.network.openstack_networking_network_v2.k8s[0] +module.network.openstack_networking_router_interface_v2.k8s[0] +module.network.openstack_networking_router_v2.k8s[0] +module.network.openstack_networking_subnet_v2.k8s[0] +$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["1"]' +Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"1\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["2"]' +Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"2\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["3"]' +Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"3\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[0]' 'module.compute.openstack_compute_instance_v2.k8s_node["1"]' +Move "module.compute.openstack_compute_instance_v2.k8s_node[0]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"1\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[1]' 'module.compute.openstack_compute_instance_v2.k8s_node["2"]' +Move "module.compute.openstack_compute_instance_v2.k8s_node[1]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"2\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[2]' 'module.compute.openstack_compute_instance_v2.k8s_node["3"]' +Move "module.compute.openstack_compute_instance_v2.k8s_node[2]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"3\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[0]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["1"]' +Move "module.ips.openstack_networking_floatingip_v2.k8s_node[0]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"1\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[1]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["2"]' +Move "module.ips.openstack_networking_floatingip_v2.k8s_node[1]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"2\"]" +Successfully moved 1 object(s). +$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[2]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["3"]' +Move "module.ips.openstack_networking_floatingip_v2.k8s_node[2]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"3\"]" +Successfully moved 1 object(s). +``` + +Of course for nodes without floating ips those steps can be omitted. diff --git a/kubespray/contrib/terraform/openstack/hosts b/kubespray/contrib/terraform/openstack/hosts new file mode 100755 index 0000000..6c89e1c --- /dev/null +++ b/kubespray/contrib/terraform/openstack/hosts @@ -0,0 +1,458 @@ +#!/usr/bin/env python3 +# +# Copyright 2015 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# original: https://github.com/CiscoCloud/terraform.py + +"""\ +Dynamic inventory for Terraform - finds all `.tfstate` files below the working +directory and generates an inventory based on them. +""" +import argparse +from collections import defaultdict +import random +from functools import wraps +import json +import os +import re + +VERSION = '0.4.0pre' + + +def tfstates(root=None): + root = root or os.getcwd() + for dirpath, _, filenames in os.walk(root): + for name in filenames: + if os.path.splitext(name)[-1] == '.tfstate': + yield os.path.join(dirpath, name) + +def convert_to_v3_structure(attributes, prefix=''): + """ Convert the attributes from v4 to v3 + Receives a dict and return a dictionary """ + result = {} + if isinstance(attributes, str): + # In the case when we receive a string (e.g. values for security_groups) + return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes} + for key, value in attributes.items(): + if isinstance(value, list): + if len(value): + result['{}{}.#'.format(prefix, key, hash)] = len(value) + for i, v in enumerate(value): + result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i))) + elif isinstance(value, dict): + result['{}{}.%'.format(prefix, key)] = len(value) + for k, v in value.items(): + result['{}{}.{}'.format(prefix, key, k)] = v + else: + result['{}{}'.format(prefix, key)] = value + return result + +def iterresources(filenames): + for filename in filenames: + with open(filename, 'r') as json_file: + state = json.load(json_file) + tf_version = state['version'] + if tf_version == 3: + for module in state['modules']: + name = module['path'][-1] + for key, resource in module['resources'].items(): + yield name, key, resource + elif tf_version == 4: + # In version 4 the structure changes so we need to iterate + # each instance inside the resource branch. + for resource in state['resources']: + name = resource['provider'].split('.')[-1] + for instance in resource['instances']: + key = "{}.{}".format(resource['type'], resource['name']) + if 'index_key' in instance: + key = "{}.{}".format(key, instance['index_key']) + data = {} + data['type'] = resource['type'] + data['provider'] = resource['provider'] + data['depends_on'] = instance.get('depends_on', []) + data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])} + if 'id' in instance['attributes']: + data['primary']['id'] = instance['attributes']['id'] + data['primary']['meta'] = instance['attributes'].get('meta',{}) + yield name, key, data + else: + raise KeyError('tfstate version %d not supported' % tf_version) + + +## READ RESOURCES +PARSERS = {} + + +def _clean_dc(dcname): + # Consul DCs are strictly alphanumeric with underscores and hyphens - + # ensure that the consul_dc attribute meets these requirements. + return re.sub('[^\w_\-]', '-', dcname) + + +def iterhosts(resources): + '''yield host tuples of (name, attributes, groups)''' + for module_name, key, resource in resources: + resource_type, name = key.split('.', 1) + try: + parser = PARSERS[resource_type] + except KeyError: + continue + + yield parser(resource, module_name) + + +def iterips(resources): + '''yield ip tuples of (port_id, ip)''' + for module_name, key, resource in resources: + resource_type, name = key.split('.', 1) + if resource_type == 'openstack_networking_floatingip_associate_v2': + yield openstack_floating_ips(resource) + + +def parses(prefix): + def inner(func): + PARSERS[prefix] = func + return func + + return inner + + +def calculate_mantl_vars(func): + """calculate Mantl vars""" + + @wraps(func) + def inner(*args, **kwargs): + name, attrs, groups = func(*args, **kwargs) + + # attrs + if attrs.get('role', '') == 'control': + attrs['consul_is_server'] = True + else: + attrs['consul_is_server'] = False + + # groups + if attrs.get('publicly_routable', False): + groups.append('publicly_routable') + + return name, attrs, groups + + return inner + + +def _parse_prefix(source, prefix, sep='.'): + for compkey, value in list(source.items()): + try: + curprefix, rest = compkey.split(sep, 1) + except ValueError: + continue + + if curprefix != prefix or rest == '#': + continue + + yield rest, value + + +def parse_attr_list(source, prefix, sep='.'): + attrs = defaultdict(dict) + for compkey, value in _parse_prefix(source, prefix, sep): + idx, key = compkey.split(sep, 1) + attrs[idx][key] = value + + return list(attrs.values()) + + +def parse_dict(source, prefix, sep='.'): + return dict(_parse_prefix(source, prefix, sep)) + + +def parse_list(source, prefix, sep='.'): + return [value for _, value in _parse_prefix(source, prefix, sep)] + + +def parse_bool(string_form): + if type(string_form) is bool: + return string_form + + token = string_form.lower()[0] + + if token == 't': + return True + elif token == 'f': + return False + else: + raise ValueError('could not convert %r to a bool' % string_form) + + +@parses('metal_device') +def metal_device(resource, tfvars=None): + raw_attrs = resource['primary']['attributes'] + name = raw_attrs['hostname'] + groups = [] + + attrs = { + 'id': raw_attrs['id'], + 'facilities': parse_list(raw_attrs, 'facilities'), + 'hostname': raw_attrs['hostname'], + 'operating_system': raw_attrs['operating_system'], + 'locked': parse_bool(raw_attrs['locked']), + 'tags': parse_list(raw_attrs, 'tags'), + 'plan': raw_attrs['plan'], + 'project_id': raw_attrs['project_id'], + 'state': raw_attrs['state'], + # ansible + 'ansible_host': raw_attrs['network.0.address'], + 'ansible_ssh_user': 'root', # Use root by default in metal + # generic + 'ipv4_address': raw_attrs['network.0.address'], + 'public_ipv4': raw_attrs['network.0.address'], + 'ipv6_address': raw_attrs['network.1.address'], + 'public_ipv6': raw_attrs['network.1.address'], + 'private_ipv4': raw_attrs['network.2.address'], + 'provider': 'metal', + } + + if raw_attrs['operating_system'] == 'flatcar_stable': + # For Flatcar set the ssh_user to core + attrs.update({'ansible_ssh_user': 'core'}) + + # add groups based on attrs + groups.append('metal_operating_system=' + attrs['operating_system']) + groups.append('metal_locked=%s' % attrs['locked']) + groups.append('metal_state=' + attrs['state']) + groups.append('metal_plan=' + attrs['plan']) + + # groups specific to kubespray + groups = groups + attrs['tags'] + + return name, attrs, groups + + +def openstack_floating_ips(resource): + raw_attrs = resource['primary']['attributes'] + attrs = { + 'ip': raw_attrs['floating_ip'], + 'port_id': raw_attrs['port_id'], + } + return attrs + +def openstack_floating_ips(resource): + raw_attrs = resource['primary']['attributes'] + return raw_attrs['port_id'], raw_attrs['floating_ip'] + +@parses('openstack_compute_instance_v2') +@calculate_mantl_vars +def openstack_host(resource, module_name): + raw_attrs = resource['primary']['attributes'] + name = raw_attrs['name'] + groups = [] + + attrs = { + 'access_ip_v4': raw_attrs['access_ip_v4'], + 'access_ip_v6': raw_attrs['access_ip_v6'], + 'access_ip': raw_attrs['access_ip_v4'], + 'ip': raw_attrs['network.0.fixed_ip_v4'], + 'flavor': parse_dict(raw_attrs, 'flavor', + sep='_'), + 'id': raw_attrs['id'], + 'image': parse_dict(raw_attrs, 'image', + sep='_'), + 'key_pair': raw_attrs['key_pair'], + 'metadata': parse_dict(raw_attrs, 'metadata'), + 'network': parse_attr_list(raw_attrs, 'network'), + 'region': raw_attrs.get('region', ''), + 'security_groups': parse_list(raw_attrs, 'security_groups'), + # ansible + 'ansible_ssh_port': 22, + # workaround for an OpenStack bug where hosts have a different domain + # after they're restarted + 'host_domain': 'novalocal', + 'use_host_domain': True, + # generic + 'public_ipv4': raw_attrs['access_ip_v4'], + 'private_ipv4': raw_attrs['access_ip_v4'], + 'port_id' : raw_attrs['network.0.port'], + 'provider': 'openstack', + } + + if 'floating_ip' in raw_attrs: + attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4'] + + try: + if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1": + attrs.update({ + 'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']), + 'publicly_routable': True, + }) + else: + attrs.update({ + 'ansible_host': raw_attrs['access_ip_v4'], + 'publicly_routable': True, + }) + except (KeyError, ValueError): + attrs.update({'ansible_host': '', 'publicly_routable': False}) + + # Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017 + + # attrs specific to Ansible + if 'metadata.ssh_user' in raw_attrs: + attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user'] + + if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0: + device_index = 1 + for key, value in list(raw_attrs.items()): + match = re.search("^volume.*.device$", key) + if match: + attrs['disk_volume_device_'+str(device_index)] = value + device_index += 1 + + + # attrs specific to Mantl + attrs.update({ + 'role': attrs['metadata'].get('role', 'none') + }) + + # add groups based on attrs + groups.append('os_image=' + str(attrs['image']['id'])) + groups.append('os_flavor=' + str(attrs['flavor']['name'])) + groups.extend('os_metadata_%s=%s' % item + for item in list(attrs['metadata'].items())) + groups.append('os_region=' + str(attrs['region'])) + + # groups specific to kubespray + for group in attrs['metadata'].get('kubespray_groups', "").split(","): + groups.append(group) + + return name, attrs, groups + + +def iter_host_ips(hosts, ips): + '''Update hosts that have an entry in the floating IP list''' + for host in hosts: + port_id = host[1]['port_id'] + + if port_id in ips: + ip = ips[port_id] + + host[1].update({ + 'access_ip_v4': ip, + 'access_ip': ip, + 'public_ipv4': ip, + 'ansible_host': ip, + }) + + if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0": + host[1].pop('access_ip') + + yield host + + +## QUERY TYPES +def query_host(hosts, target): + for name, attrs, _ in hosts: + if name == target: + return attrs + + return {} + + +def query_list(hosts): + groups = defaultdict(dict) + meta = {} + + for name, attrs, hostgroups in hosts: + for group in set(hostgroups): + # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf + # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all" + if not group: group = "all" + + groups[group].setdefault('hosts', []) + groups[group]['hosts'].append(name) + + meta[name] = attrs + + groups['_meta'] = {'hostvars': meta} + return groups + + +def query_hostfile(hosts): + out = ['## begin hosts generated by terraform.py ##'] + out.extend( + '{}\t{}'.format(attrs['ansible_host'].ljust(16), name) + for name, attrs, _ in hosts + ) + + out.append('## end hosts generated by terraform.py ##') + return '\n'.join(out) + + +def main(): + parser = argparse.ArgumentParser( + __file__, __doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) + modes = parser.add_mutually_exclusive_group(required=True) + modes.add_argument('--list', + action='store_true', + help='list all variables') + modes.add_argument('--host', help='list variables for a single host') + modes.add_argument('--version', + action='store_true', + help='print version and exit') + modes.add_argument('--hostfile', + action='store_true', + help='print hosts as a /etc/hosts snippet') + parser.add_argument('--pretty', + action='store_true', + help='pretty-print output JSON') + parser.add_argument('--nometa', + action='store_true', + help='with --list, exclude hostvars') + default_root = os.environ.get('TERRAFORM_STATE_ROOT', + os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', '..', ))) + parser.add_argument('--root', + default=default_root, + help='custom root to search for `.tfstate`s in') + + args = parser.parse_args() + + if args.version: + print('%s %s' % (__file__, VERSION)) + parser.exit() + + hosts = iterhosts(iterresources(tfstates(args.root))) + + # Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts + ips = dict(iterips(iterresources(tfstates(args.root)))) + + if ips: + hosts = iter_host_ips(hosts, ips) + + if args.list: + output = query_list(hosts) + if args.nometa: + del output['_meta'] + print(json.dumps(output, indent=4 if args.pretty else None)) + elif args.host: + output = query_host(hosts, args.host) + print(json.dumps(output, indent=4 if args.pretty else None)) + elif args.hostfile: + output = query_hostfile(hosts) + print(output) + + parser.exit() + + +if __name__ == '__main__': + main() diff --git a/kubespray/contrib/terraform/openstack/kubespray.tf b/kubespray/contrib/terraform/openstack/kubespray.tf new file mode 100644 index 0000000..e4f302f --- /dev/null +++ b/kubespray/contrib/terraform/openstack/kubespray.tf @@ -0,0 +1,129 @@ +module "network" { + source = "./modules/network" + + external_net = var.external_net + network_name = var.network_name + subnet_cidr = var.subnet_cidr + cluster_name = var.cluster_name + dns_nameservers = var.dns_nameservers + network_dns_domain = var.network_dns_domain + use_neutron = var.use_neutron + port_security_enabled = var.port_security_enabled + router_id = var.router_id +} + +module "ips" { + source = "./modules/ips" + + number_of_k8s_masters = var.number_of_k8s_masters + number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd + number_of_k8s_nodes = var.number_of_k8s_nodes + floatingip_pool = var.floatingip_pool + number_of_bastions = var.number_of_bastions + external_net = var.external_net + network_name = var.network_name + router_id = module.network.router_id + k8s_nodes = var.k8s_nodes + k8s_masters = var.k8s_masters + k8s_master_fips = var.k8s_master_fips + bastion_fips = var.bastion_fips + router_internal_port_id = module.network.router_internal_port_id +} + +module "compute" { + source = "./modules/compute" + + cluster_name = var.cluster_name + az_list = var.az_list + az_list_node = var.az_list_node + number_of_k8s_masters = var.number_of_k8s_masters + number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd + number_of_etcd = var.number_of_etcd + number_of_k8s_masters_no_floating_ip = var.number_of_k8s_masters_no_floating_ip + number_of_k8s_masters_no_floating_ip_no_etcd = var.number_of_k8s_masters_no_floating_ip_no_etcd + number_of_k8s_nodes = var.number_of_k8s_nodes + number_of_bastions = var.number_of_bastions + number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip + number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip + k8s_masters = var.k8s_masters + k8s_nodes = var.k8s_nodes + bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb + etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb + master_root_volume_size_in_gb = var.master_root_volume_size_in_gb + node_root_volume_size_in_gb = var.node_root_volume_size_in_gb + gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb + gfs_volume_size_in_gb = var.gfs_volume_size_in_gb + master_volume_type = var.master_volume_type + node_volume_type = var.node_volume_type + public_key_path = var.public_key_path + image = var.image + image_uuid = var.image_uuid + image_gfs = var.image_gfs + image_master = var.image_master + image_master_uuid = var.image_master_uuid + image_gfs_uuid = var.image_gfs_uuid + ssh_user = var.ssh_user + ssh_user_gfs = var.ssh_user_gfs + flavor_k8s_master = var.flavor_k8s_master + flavor_k8s_node = var.flavor_k8s_node + flavor_etcd = var.flavor_etcd + flavor_gfs_node = var.flavor_gfs_node + network_name = var.network_name + flavor_bastion = var.flavor_bastion + k8s_master_fips = module.ips.k8s_master_fips + k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips + k8s_masters_fips = module.ips.k8s_masters_fips + k8s_node_fips = module.ips.k8s_node_fips + k8s_nodes_fips = module.ips.k8s_nodes_fips + bastion_fips = module.ips.bastion_fips + bastion_allowed_remote_ips = var.bastion_allowed_remote_ips + master_allowed_remote_ips = var.master_allowed_remote_ips + k8s_allowed_remote_ips = var.k8s_allowed_remote_ips + k8s_allowed_egress_ips = var.k8s_allowed_egress_ips + supplementary_master_groups = var.supplementary_master_groups + supplementary_node_groups = var.supplementary_node_groups + master_allowed_ports = var.master_allowed_ports + worker_allowed_ports = var.worker_allowed_ports + bastion_allowed_ports = var.bastion_allowed_ports + use_access_ip = var.use_access_ip + master_server_group_policy = var.master_server_group_policy + node_server_group_policy = var.node_server_group_policy + etcd_server_group_policy = var.etcd_server_group_policy + extra_sec_groups = var.extra_sec_groups + extra_sec_groups_name = var.extra_sec_groups_name + group_vars_path = var.group_vars_path + port_security_enabled = var.port_security_enabled + force_null_port_security = var.force_null_port_security + network_router_id = module.network.router_id + network_id = module.network.network_id + use_existing_network = var.use_existing_network + private_subnet_id = module.network.subnet_id + + depends_on = [ + module.network.subnet_id + ] +} + +output "private_subnet_id" { + value = module.network.subnet_id +} + +output "floating_network_id" { + value = var.external_net +} + +output "router_id" { + value = module.network.router_id +} + +output "k8s_master_fips" { + value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address] +} + +output "k8s_node_fips" { + value = var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address] +} + +output "bastion_fips" { + value = module.ips.bastion_fips +} diff --git a/kubespray/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt b/kubespray/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt new file mode 100644 index 0000000..a304b2c --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt @@ -0,0 +1 @@ +ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" diff --git a/kubespray/contrib/terraform/openstack/modules/compute/main.tf b/kubespray/contrib/terraform/openstack/modules/compute/main.tf new file mode 100644 index 0000000..7af82e1 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/compute/main.tf @@ -0,0 +1,903 @@ +data "openstack_images_image_v2" "vm_image" { + count = var.image_uuid == "" ? 1 : 0 + most_recent = true + name = var.image +} + +data "openstack_images_image_v2" "gfs_image" { + count = var.image_gfs_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0 + most_recent = true + name = var.image_gfs == "" ? var.image : var.image_gfs +} + +data "openstack_images_image_v2" "image_master" { + count = var.image_master_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0 + name = var.image_master == "" ? var.image : var.image_master +} + +data "cloudinit_config" "cloudinit" { + part { + content_type = "text/cloud-config" + content = file("${path.module}/templates/cloudinit.yaml") + } +} + +data "openstack_networking_network_v2" "k8s_network" { + count = var.use_existing_network ? 1 : 0 + name = var.network_name +} + +resource "openstack_compute_keypair_v2" "k8s" { + name = "kubernetes-${var.cluster_name}" + public_key = chomp(file(var.public_key_path)) +} + +resource "openstack_networking_secgroup_v2" "k8s_master" { + name = "${var.cluster_name}-k8s-master" + description = "${var.cluster_name} - Kubernetes Master" + delete_default_rules = true +} + +resource "openstack_networking_secgroup_v2" "k8s_master_extra" { + count = "%{if var.extra_sec_groups}1%{else}0%{endif}" + name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}" + description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform" + delete_default_rules = true +} + +resource "openstack_networking_secgroup_rule_v2" "k8s_master" { + count = length(var.master_allowed_remote_ips) + direction = "ingress" + ethertype = "IPv4" + protocol = "tcp" + port_range_min = "6443" + port_range_max = "6443" + remote_ip_prefix = var.master_allowed_remote_ips[count.index] + security_group_id = openstack_networking_secgroup_v2.k8s_master.id +} + +resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports" { + count = length(var.master_allowed_ports) + direction = "ingress" + ethertype = "IPv4" + protocol = lookup(var.master_allowed_ports[count.index], "protocol", "tcp") + port_range_min = lookup(var.master_allowed_ports[count.index], "port_range_min") + port_range_max = lookup(var.master_allowed_ports[count.index], "port_range_max") + remote_ip_prefix = lookup(var.master_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") + security_group_id = openstack_networking_secgroup_v2.k8s_master.id +} + +resource "openstack_networking_secgroup_v2" "bastion" { + name = "${var.cluster_name}-bastion" + count = var.number_of_bastions != "" ? 1 : 0 + description = "${var.cluster_name} - Bastion Server" + delete_default_rules = true +} + +resource "openstack_networking_secgroup_rule_v2" "bastion" { + count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0 + direction = "ingress" + ethertype = "IPv4" + protocol = "tcp" + port_range_min = "22" + port_range_max = "22" + remote_ip_prefix = var.bastion_allowed_remote_ips[count.index] + security_group_id = openstack_networking_secgroup_v2.bastion[0].id +} + +resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" { + count = length(var.bastion_allowed_ports) + direction = "ingress" + ethertype = "IPv4" + protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp") + port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min") + port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max") + remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") + security_group_id = openstack_networking_secgroup_v2.bastion[0].id +} + +resource "openstack_networking_secgroup_v2" "k8s" { + name = "${var.cluster_name}-k8s" + description = "${var.cluster_name} - Kubernetes" + delete_default_rules = true +} + +resource "openstack_networking_secgroup_rule_v2" "k8s" { + direction = "ingress" + ethertype = "IPv4" + remote_group_id = openstack_networking_secgroup_v2.k8s.id + security_group_id = openstack_networking_secgroup_v2.k8s.id +} + +resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" { + count = length(var.k8s_allowed_remote_ips) + direction = "ingress" + ethertype = "IPv4" + protocol = "tcp" + port_range_min = "22" + port_range_max = "22" + remote_ip_prefix = var.k8s_allowed_remote_ips[count.index] + security_group_id = openstack_networking_secgroup_v2.k8s.id +} + +resource "openstack_networking_secgroup_rule_v2" "egress" { + count = length(var.k8s_allowed_egress_ips) + direction = "egress" + ethertype = "IPv4" + remote_ip_prefix = var.k8s_allowed_egress_ips[count.index] + security_group_id = openstack_networking_secgroup_v2.k8s.id +} + +resource "openstack_networking_secgroup_v2" "worker" { + name = "${var.cluster_name}-k8s-worker" + description = "${var.cluster_name} - Kubernetes worker nodes" + delete_default_rules = true +} + +resource "openstack_networking_secgroup_v2" "worker_extra" { + count = "%{if var.extra_sec_groups}1%{else}0%{endif}" + name = "${var.cluster_name}-k8s-worker-${var.extra_sec_groups_name}" + description = "${var.cluster_name} - Kubernetes worker nodes - rules not managed by terraform" + delete_default_rules = true +} + +resource "openstack_networking_secgroup_rule_v2" "worker" { + count = length(var.worker_allowed_ports) + direction = "ingress" + ethertype = "IPv4" + protocol = lookup(var.worker_allowed_ports[count.index], "protocol", "tcp") + port_range_min = lookup(var.worker_allowed_ports[count.index], "port_range_min") + port_range_max = lookup(var.worker_allowed_ports[count.index], "port_range_max") + remote_ip_prefix = lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") + security_group_id = openstack_networking_secgroup_v2.worker.id +} + +resource "openstack_compute_servergroup_v2" "k8s_master" { + count = var.master_server_group_policy != "" ? 1 : 0 + name = "k8s-master-srvgrp" + policies = [var.master_server_group_policy] +} + +resource "openstack_compute_servergroup_v2" "k8s_node" { + count = var.node_server_group_policy != "" ? 1 : 0 + name = "k8s-node-srvgrp" + policies = [var.node_server_group_policy] +} + +resource "openstack_compute_servergroup_v2" "k8s_etcd" { + count = var.etcd_server_group_policy != "" ? 1 : 0 + name = "k8s-etcd-srvgrp" + policies = [var.etcd_server_group_policy] +} + +locals { +# master groups + master_sec_groups = compact([ + openstack_networking_secgroup_v2.k8s_master.id, + openstack_networking_secgroup_v2.k8s.id, + var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "", + ]) +# worker groups + worker_sec_groups = compact([ + openstack_networking_secgroup_v2.k8s.id, + openstack_networking_secgroup_v2.worker.id, + var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "", + ]) +# bastion groups + bastion_sec_groups = compact(concat([ + openstack_networking_secgroup_v2.k8s.id, + openstack_networking_secgroup_v2.bastion[0].id, + ])) +# etcd groups + etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) +# glusterfs groups + gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) + +# Image uuid + image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id +# Image_gfs uuid + image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id +# image_master uuidimage_gfs_uuid + image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id +} + +resource "openstack_networking_port_v2" "bastion_port" { + count = var.number_of_bastions + name = "${var.cluster_name}-bastion-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "bastion" { + name = "${var.cluster_name}-bastion-${count.index + 1}" + count = var.number_of_bastions + image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null + flavor_id = var.flavor_bastion + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + dynamic "block_device" { + for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] + content { + uuid = local.image_to_use_node + source_type = "image" + volume_size = var.bastion_root_volume_size_in_gb + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.bastion_port.*.id, count.index) + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "bastion" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" + } +} + +resource "openstack_networking_port_v2" "k8s_master_port" { + count = var.number_of_k8s_masters + name = "${var.cluster_name}-k8s-master-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.master_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_master" { + name = "${var.cluster_name}-k8s-master-${count.index + 1}" + count = var.number_of_k8s_masters + availability_zone = element(var.az_list, count.index) + image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = var.flavor_k8s_master + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + + dynamic "block_device" { + for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.master_root_volume_size_in_gb + volume_type = var.master_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_master[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" + } +} + +resource "openstack_networking_port_v2" "k8s_masters_port" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} + name = "${var.cluster_name}-k8s-${each.key}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.master_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_masters" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} + name = "${var.cluster_name}-k8s-${each.key}" + availability_zone = each.value.az + image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = each.value.flavor + key_pair = openstack_compute_keypair_v2.k8s.name + + dynamic "block_device" { + for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.master_root_volume_size_in_gb + volume_type = var.master_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = openstack_networking_port_v2.k8s_masters_port[each.key].id + } + + dynamic "scheduler_hints" { + for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_master[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" + } +} + +resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" { + count = var.number_of_k8s_masters_no_etcd + name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.master_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { + name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" + count = var.number_of_k8s_masters_no_etcd + availability_zone = element(var.az_list, count.index) + image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = var.flavor_k8s_master + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + + dynamic "block_device" { + for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.master_root_volume_size_in_gb + volume_type = var.master_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_master[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" + } +} + +resource "openstack_networking_port_v2" "etcd_port" { + count = var.number_of_etcd + name = "${var.cluster_name}-etcd-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "etcd" { + name = "${var.cluster_name}-etcd-${count.index + 1}" + count = var.number_of_etcd + availability_zone = element(var.az_list, count.index) + image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = var.flavor_etcd + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + dynamic "block_device" { + for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.etcd_root_volume_size_in_gb + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.etcd_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_etcd[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "etcd,no_floating" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } +} + +resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" { + count = var.number_of_k8s_masters_no_floating_ip + name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.master_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { + name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" + count = var.number_of_k8s_masters_no_floating_ip + availability_zone = element(var.az_list, count.index) + image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = var.flavor_k8s_master + key_pair = openstack_compute_keypair_v2.k8s.name + + dynamic "block_device" { + for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.master_root_volume_size_in_gb + volume_type = var.master_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_master[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } +} + +resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" { + count = var.number_of_k8s_masters_no_floating_ip_no_etcd + name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.master_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { + name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" + count = var.number_of_k8s_masters_no_floating_ip_no_etcd + availability_zone = element(var.az_list, count.index) + image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = var.flavor_k8s_master + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + dynamic "block_device" { + for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.master_root_volume_size_in_gb + volume_type = var.master_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_master[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } +} + +resource "openstack_networking_port_v2" "k8s_node_port" { + count = var.number_of_k8s_nodes + name = "${var.cluster_name}-k8s-node-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_node" { + name = "${var.cluster_name}-k8s-node-${count.index + 1}" + count = var.number_of_k8s_nodes + availability_zone = element(var.az_list_node, count.index) + image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null + flavor_id = var.flavor_k8s_node + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + dynamic "block_device" { + for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] + content { + uuid = local.image_to_use_node + source_type = "image" + volume_size = var.node_root_volume_size_in_gb + volume_type = var.node_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index) + } + + + dynamic "scheduler_hints" { + for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_node[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" + } +} + +resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" { + count = var.number_of_k8s_nodes_no_floating_ip + name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { + name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" + count = var.number_of_k8s_nodes_no_floating_ip + availability_zone = element(var.az_list_node, count.index) + image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null + flavor_id = var.flavor_k8s_node + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + dynamic "block_device" { + for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] + content { + uuid = local.image_to_use_node + source_type = "image" + volume_size = var.node_root_volume_size_in_gb + volume_type = var.node_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_node[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } +} + +resource "openstack_networking_port_v2" "k8s_nodes_port" { + for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} + name = "${var.cluster_name}-k8s-node-${each.key}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_nodes" { + for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} + name = "${var.cluster_name}-k8s-node-${each.key}" + availability_zone = each.value.az + image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null + flavor_id = each.value.flavor + key_pair = openstack_compute_keypair_v2.k8s.name + user_data = data.cloudinit_config.cloudinit.rendered + + dynamic "block_device" { + for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] + content { + uuid = local.image_to_use_node + source_type = "image" + volume_size = var.node_root_volume_size_in_gb + volume_type = var.node_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = openstack_networking_port_v2.k8s_nodes_port[each.key].id + } + + dynamic "scheduler_hints" { + for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_node[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" + } +} + +resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" { + count = var.number_of_gfs_nodes_no_floating_ip + name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + fixed_ip { + subnet_id = var.private_subnet_id + } + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { + name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" + count = var.number_of_gfs_nodes_no_floating_ip + availability_zone = element(var.az_list, count.index) + image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null + flavor_id = var.flavor_gfs_node + key_pair = openstack_compute_keypair_v2.k8s.name + + dynamic "block_device" { + for_each = var.gfs_root_volume_size_in_gb > 0 ? [local.image_to_use_gfs] : [] + content { + uuid = local.image_to_use_gfs + source_type = "image" + volume_size = var.gfs_root_volume_size_in_gb + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index) + } + + dynamic "scheduler_hints" { + for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_node[0].id + } + } + + metadata = { + ssh_user = var.ssh_user_gfs + kubespray_groups = "gfs-cluster,network-storage,no_floating" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } +} + +resource "openstack_networking_floatingip_associate_v2" "bastion" { + count = var.number_of_bastions + floating_ip = var.bastion_fips[count.index] + port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index) +} + + +resource "openstack_networking_floatingip_associate_v2" "k8s_master" { + count = var.number_of_k8s_masters + floating_ip = var.k8s_master_fips[count.index] + port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) +} + +resource "openstack_networking_floatingip_associate_v2" "k8s_masters" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {} + floating_ip = var.k8s_masters_fips[each.key].address + port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id +} + +resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" { + count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0 + floating_ip = var.k8s_master_no_etcd_fips[count.index] + port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index) +} + +resource "openstack_networking_floatingip_associate_v2" "k8s_node" { + count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0 + floating_ip = var.k8s_node_fips[count.index] + port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index) +} + +resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" { + for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {} + floating_ip = var.k8s_nodes_fips[each.key].address + port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id +} + +resource "openstack_blockstorage_volume_v2" "glusterfs_volume" { + name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}" + count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0 + description = "Non-ephemeral volume for GlusterFS" + size = var.gfs_volume_size_in_gb +} + +resource "openstack_compute_volume_attach_v2" "glusterfs_volume" { + count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0 + instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index) + volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index) +} diff --git a/kubespray/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml b/kubespray/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml new file mode 100644 index 0000000..396acb9 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml @@ -0,0 +1,17 @@ +# yamllint disable rule:comments +#cloud-config +## in some cases novnc console access is required +## it requires ssh password to be set +#ssh_pwauth: yes +#chpasswd: +# list: | +# root:secret +# expire: False + +## in some cases direct root ssh access via ssh key is required +#disable_root: false + +## in some cases additional CA certs are required +#ca-certs: +# trusted: | +# -----BEGIN CERTIFICATE----- diff --git a/kubespray/contrib/terraform/openstack/modules/compute/variables.tf b/kubespray/contrib/terraform/openstack/modules/compute/variables.tf new file mode 100644 index 0000000..9259fd9 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/compute/variables.tf @@ -0,0 +1,195 @@ +variable "cluster_name" {} + +variable "az_list" { + type = list(string) +} + +variable "az_list_node" { + type = list(string) +} + +variable "number_of_k8s_masters" {} + +variable "number_of_k8s_masters_no_etcd" {} + +variable "number_of_etcd" {} + +variable "number_of_k8s_masters_no_floating_ip" {} + +variable "number_of_k8s_masters_no_floating_ip_no_etcd" {} + +variable "number_of_k8s_nodes" {} + +variable "number_of_k8s_nodes_no_floating_ip" {} + +variable "number_of_bastions" {} + +variable "number_of_gfs_nodes_no_floating_ip" {} + +variable "bastion_root_volume_size_in_gb" {} + +variable "etcd_root_volume_size_in_gb" {} + +variable "master_root_volume_size_in_gb" {} + +variable "node_root_volume_size_in_gb" {} + +variable "gfs_root_volume_size_in_gb" {} + +variable "gfs_volume_size_in_gb" {} + +variable "master_volume_type" {} + +variable "node_volume_type" {} + +variable "public_key_path" {} + +variable "image" {} + +variable "image_gfs" {} + +variable "ssh_user" {} + +variable "ssh_user_gfs" {} + +variable "flavor_k8s_master" {} + +variable "flavor_k8s_node" {} + +variable "flavor_etcd" {} + +variable "flavor_gfs_node" {} + +variable "network_name" {} + +variable "flavor_bastion" {} + +variable "network_id" { + default = "" +} + +variable "use_existing_network" { + type = bool +} + +variable "network_router_id" { + default = "" +} + +variable "k8s_master_fips" { + type = list +} + +variable "k8s_master_no_etcd_fips" { + type = list +} + +variable "k8s_node_fips" { + type = list +} + +variable "k8s_masters_fips" { + type = map +} + +variable "k8s_nodes_fips" { + type = map +} + +variable "bastion_fips" { + type = list +} + +variable "bastion_allowed_remote_ips" { + type = list +} + +variable "master_allowed_remote_ips" { + type = list +} + +variable "k8s_allowed_remote_ips" { + type = list +} + +variable "k8s_allowed_egress_ips" { + type = list +} + +variable "k8s_masters" {} + +variable "k8s_nodes" {} + +variable "supplementary_master_groups" { + default = "" +} + +variable "supplementary_node_groups" { + default = "" +} + +variable "master_allowed_ports" { + type = list +} + +variable "worker_allowed_ports" { + type = list +} + +variable "bastion_allowed_ports" { + type = list +} + +variable "use_access_ip" {} + +variable "master_server_group_policy" { + type = string +} + +variable "node_server_group_policy" { + type = string +} + +variable "etcd_server_group_policy" { + type = string +} + +variable "extra_sec_groups" { + type = bool +} + +variable "extra_sec_groups_name" { + type = string +} + +variable "image_uuid" { + type = string +} + +variable "image_gfs_uuid" { + type = string +} + +variable "image_master" { + type = string +} + +variable "image_master_uuid" { + type = string +} + +variable "group_vars_path" { + type = string +} + +variable "port_security_enabled" { + type = bool +} + +variable "force_null_port_security" { + type = bool +} + +variable "private_subnet_id" { + type = string +} diff --git a/kubespray/contrib/terraform/openstack/modules/compute/versions.tf b/kubespray/contrib/terraform/openstack/modules/compute/versions.tf new file mode 100644 index 0000000..6c94279 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/compute/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + openstack = { + source = "terraform-provider-openstack/openstack" + } + } + required_version = ">= 0.12.26" +} diff --git a/kubespray/contrib/terraform/openstack/modules/ips/main.tf b/kubespray/contrib/terraform/openstack/modules/ips/main.tf new file mode 100644 index 0000000..3f962fd --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/ips/main.tf @@ -0,0 +1,47 @@ +resource "null_resource" "dummy_dependency" { + triggers = { + dependency_id = var.router_id + } + depends_on = [ + var.router_internal_port_id + ] +} + +# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. +resource "openstack_networking_floatingip_v2" "k8s_master" { + count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + +resource "openstack_networking_floatingip_v2" "k8s_masters" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {} + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + +# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. +resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" { + count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + +resource "openstack_networking_floatingip_v2" "k8s_node" { + count = var.number_of_k8s_nodes + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + +resource "openstack_networking_floatingip_v2" "bastion" { + count = length(var.bastion_fips) > 0 ? 0 : var.number_of_bastions + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + +resource "openstack_networking_floatingip_v2" "k8s_nodes" { + for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {} + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + diff --git a/kubespray/contrib/terraform/openstack/modules/ips/outputs.tf b/kubespray/contrib/terraform/openstack/modules/ips/outputs.tf new file mode 100644 index 0000000..3ff4622 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/ips/outputs.tf @@ -0,0 +1,25 @@ +# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. +output "k8s_master_fips" { + value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address +} + +output "k8s_masters_fips" { + value = openstack_networking_floatingip_v2.k8s_masters +} + +# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. +output "k8s_master_no_etcd_fips" { + value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address +} + +output "k8s_node_fips" { + value = openstack_networking_floatingip_v2.k8s_node[*].address +} + +output "k8s_nodes_fips" { + value = openstack_networking_floatingip_v2.k8s_nodes +} + +output "bastion_fips" { + value = length(var.bastion_fips) > 0 ? var.bastion_fips : openstack_networking_floatingip_v2.bastion[*].address +} diff --git a/kubespray/contrib/terraform/openstack/modules/ips/variables.tf b/kubespray/contrib/terraform/openstack/modules/ips/variables.tf new file mode 100644 index 0000000..b52888b --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/ips/variables.tf @@ -0,0 +1,27 @@ +variable "number_of_k8s_masters" {} + +variable "number_of_k8s_masters_no_etcd" {} + +variable "number_of_k8s_nodes" {} + +variable "floatingip_pool" {} + +variable "number_of_bastions" {} + +variable "external_net" {} + +variable "network_name" {} + +variable "router_id" { + default = "" +} + +variable "k8s_masters" {} + +variable "k8s_nodes" {} + +variable "k8s_master_fips" {} + +variable "bastion_fips" {} + +variable "router_internal_port_id" {} diff --git a/kubespray/contrib/terraform/openstack/modules/ips/versions.tf b/kubespray/contrib/terraform/openstack/modules/ips/versions.tf new file mode 100644 index 0000000..b7bf5a9 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/ips/versions.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + null = { + source = "hashicorp/null" + } + openstack = { + source = "terraform-provider-openstack/openstack" + } + } + required_version = ">= 0.12.26" +} diff --git a/kubespray/contrib/terraform/openstack/modules/network/main.tf b/kubespray/contrib/terraform/openstack/modules/network/main.tf new file mode 100644 index 0000000..a6324d7 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/network/main.tf @@ -0,0 +1,34 @@ +resource "openstack_networking_router_v2" "k8s" { + name = "${var.cluster_name}-router" + count = var.use_neutron == 1 && var.router_id == null ? 1 : 0 + admin_state_up = "true" + external_network_id = var.external_net +} + +data "openstack_networking_router_v2" "k8s" { + router_id = var.router_id + count = var.use_neutron == 1 && var.router_id != null ? 1 : 0 +} + +resource "openstack_networking_network_v2" "k8s" { + name = var.network_name + count = var.use_neutron + dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null + admin_state_up = "true" + port_security_enabled = var.port_security_enabled +} + +resource "openstack_networking_subnet_v2" "k8s" { + name = "${var.cluster_name}-internal-network" + count = var.use_neutron + network_id = openstack_networking_network_v2.k8s[count.index].id + cidr = var.subnet_cidr + ip_version = 4 + dns_nameservers = var.dns_nameservers +} + +resource "openstack_networking_router_interface_v2" "k8s" { + count = var.use_neutron + router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}" + subnet_id = openstack_networking_subnet_v2.k8s[count.index].id +} diff --git a/kubespray/contrib/terraform/openstack/modules/network/outputs.tf b/kubespray/contrib/terraform/openstack/modules/network/outputs.tf new file mode 100644 index 0000000..0e8a500 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/network/outputs.tf @@ -0,0 +1,15 @@ +output "router_id" { + value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}" +} + +output "network_id" { + value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0) +} + +output "router_internal_port_id" { + value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0) +} + +output "subnet_id" { + value = element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0) +} diff --git a/kubespray/contrib/terraform/openstack/modules/network/variables.tf b/kubespray/contrib/terraform/openstack/modules/network/variables.tf new file mode 100644 index 0000000..6cd7ff7 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/network/variables.tf @@ -0,0 +1,21 @@ +variable "external_net" {} + +variable "network_name" {} + +variable "network_dns_domain" {} + +variable "cluster_name" {} + +variable "dns_nameservers" { + type = list +} + +variable "port_security_enabled" { + type = bool +} + +variable "subnet_cidr" {} + +variable "use_neutron" {} + +variable "router_id" {} diff --git a/kubespray/contrib/terraform/openstack/modules/network/versions.tf b/kubespray/contrib/terraform/openstack/modules/network/versions.tf new file mode 100644 index 0000000..6c94279 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/modules/network/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + openstack = { + source = "terraform-provider-openstack/openstack" + } + } + required_version = ">= 0.12.26" +} diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/cluster.tfvars b/kubespray/contrib/terraform/openstack/sample-inventory/cluster.tfvars new file mode 100644 index 0000000..3c25767 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/cluster.tfvars @@ -0,0 +1,89 @@ +# your Kubernetes cluster name here +cluster_name = "i-didnt-read-the-docs" + +# list of availability zones available in your OpenStack cluster +#az_list = ["nova"] + +# SSH key to use for access to nodes +public_key_path = "~/.ssh/id_rsa.pub" + +# image to use for bastion, masters, standalone etcd instances, and nodes +image = "" + +# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.) +ssh_user = "" + +# 0|1 bastion nodes +number_of_bastions = 0 + +#flavor_bastion = "" + +# standalone etcds +number_of_etcd = 0 + +# masters +number_of_k8s_masters = 1 + +number_of_k8s_masters_no_etcd = 0 + +number_of_k8s_masters_no_floating_ip = 0 + +number_of_k8s_masters_no_floating_ip_no_etcd = 0 + +flavor_k8s_master = "" + +k8s_masters = { + # "master-1" = { + # "az" = "nova" + # "flavor" = "" + # "floating_ip" = true + # "etcd" = true + # }, + # "master-2" = { + # "az" = "nova" + # "flavor" = "" + # "floating_ip" = false + # "etcd" = true + # }, + # "master-3" = { + # "az" = "nova" + # "flavor" = "" + # "floating_ip" = true + # "etcd" = true + # }, +} + + +# nodes +number_of_k8s_nodes = 2 + +number_of_k8s_nodes_no_floating_ip = 4 + +#flavor_k8s_node = "" + +# GlusterFS +# either 0 or more than one +#number_of_gfs_nodes_no_floating_ip = 0 +#gfs_volume_size_in_gb = 150 +# Container Linux does not support GlusterFS +#image_gfs = "" +# May be different from other nodes +#ssh_user_gfs = "ubuntu" +#flavor_gfs_node = "" + +# networking +network_name = "" + +# Use a existing network with the name of network_name. Set to false to create a network with name of network_name. +# use_existing_network = true + +external_net = "" + +subnet_cidr = "" + +floatingip_pool = "" + +bastion_allowed_remote_ips = ["0.0.0.0/0"] + +# Force port security to be null. Some cloud providers do not allow to set port security. +# force_null_port_security = false \ No newline at end of file diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/all.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/aws.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/azure.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/containerd.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/coreos.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/docker.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/etcd.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/gcp.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/oci.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/offline.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/openstack.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/etcd.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/openstack/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/openstack/variables.tf b/kubespray/contrib/terraform/openstack/variables.tf new file mode 100644 index 0000000..821e442 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/variables.tf @@ -0,0 +1,335 @@ +variable "cluster_name" { + default = "example" +} + +variable "az_list" { + description = "List of Availability Zones to use for masters in your OpenStack cluster" + type = list(string) + default = ["nova"] +} + +variable "az_list_node" { + description = "List of Availability Zones to use for nodes in your OpenStack cluster" + type = list(string) + default = ["nova"] +} + +variable "number_of_bastions" { + default = 1 +} + +variable "number_of_k8s_masters" { + default = 2 +} + +variable "number_of_k8s_masters_no_etcd" { + default = 2 +} + +variable "number_of_etcd" { + default = 2 +} + +variable "number_of_k8s_masters_no_floating_ip" { + default = 2 +} + +variable "number_of_k8s_masters_no_floating_ip_no_etcd" { + default = 2 +} + +variable "number_of_k8s_nodes" { + default = 1 +} + +variable "number_of_k8s_nodes_no_floating_ip" { + default = 1 +} + +variable "number_of_gfs_nodes_no_floating_ip" { + default = 0 +} + +variable "bastion_root_volume_size_in_gb" { + default = 0 +} + +variable "etcd_root_volume_size_in_gb" { + default = 0 +} + +variable "master_root_volume_size_in_gb" { + default = 0 +} + +variable "node_root_volume_size_in_gb" { + default = 0 +} + +variable "gfs_root_volume_size_in_gb" { + default = 0 +} + +variable "gfs_volume_size_in_gb" { + default = 75 +} + +variable "master_volume_type" { + default = "Default" +} + +variable "node_volume_type" { + default = "Default" +} + +variable "public_key_path" { + description = "The path of the ssh pub key" + default = "~/.ssh/id_rsa.pub" +} + +variable "image" { + description = "the image to use" + default = "" +} + +variable "image_gfs" { + description = "Glance image to use for GlusterFS" + default = "" +} + +variable "ssh_user" { + description = "used to fill out tags for ansible inventory" + default = "ubuntu" +} + +variable "ssh_user_gfs" { + description = "used to fill out tags for ansible inventory" + default = "ubuntu" +} + +variable "flavor_bastion" { + description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" + default = 3 +} + +variable "flavor_k8s_master" { + description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" + default = 3 +} + +variable "flavor_k8s_node" { + description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" + default = 3 +} + +variable "flavor_etcd" { + description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" + default = 3 +} + +variable "flavor_gfs_node" { + description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" + default = 3 +} + +variable "network_name" { + description = "name of the internal network to use" + default = "internal" +} + +variable "use_existing_network" { + description = "Use an existing network" + type = bool + default = "false" +} + +variable "network_dns_domain" { + description = "dns_domain for the internal network" + type = string + default = null +} + +variable "use_neutron" { + description = "Use neutron" + default = 1 +} + +variable "port_security_enabled" { + description = "Enable port security on the internal network" + type = bool + default = "true" +} + +variable "force_null_port_security" { + description = "Force port security to be null. Some providers does not allow setting port security" + type = bool + default = "false" +} + +variable "subnet_cidr" { + description = "Subnet CIDR block." + type = string + default = "10.0.0.0/24" +} + +variable "dns_nameservers" { + description = "An array of DNS name server names used by hosts in this subnet." + type = list(string) + default = [] +} + +variable "k8s_master_fips" { + description = "specific pre-existing floating IPs to use for master nodes" + type = list(string) + default = [] +} + +variable "bastion_fips" { + description = "specific pre-existing floating IPs to use for bastion node" + type = list(string) + default = [] +} + +variable "floatingip_pool" { + description = "name of the floating ip pool to use" + default = "external" +} + +variable "wait_for_floatingip" { + description = "Terraform will poll the instance until the floating IP has been associated." + default = "false" +} + +variable "external_net" { + description = "uuid of the external/public network" +} + +variable "supplementary_master_groups" { + description = "supplementary kubespray ansible groups for masters, such kube_node" + default = "" +} + +variable "supplementary_node_groups" { + description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress" + default = "" +} + +variable "bastion_allowed_remote_ips" { + description = "An array of CIDRs allowed to SSH to hosts" + type = list(string) + default = ["0.0.0.0/0"] +} + +variable "master_allowed_remote_ips" { + description = "An array of CIDRs allowed to access API of masters" + type = list(string) + default = ["0.0.0.0/0"] +} + +variable "k8s_allowed_remote_ips" { + description = "An array of CIDRs allowed to SSH to hosts" + type = list(string) + default = [] +} + +variable "k8s_allowed_egress_ips" { + description = "An array of CIDRs allowed for egress traffic" + type = list(string) + default = ["0.0.0.0/0"] +} + +variable "master_allowed_ports" { + type = list(any) + + default = [] +} + +variable "worker_allowed_ports" { + type = list(any) + + default = [ + { + "protocol" = "tcp" + "port_range_min" = 30000 + "port_range_max" = 32767 + "remote_ip_prefix" = "0.0.0.0/0" + }, + ] +} + +variable "bastion_allowed_ports" { + type = list(any) + + default = [] +} + +variable "use_access_ip" { + default = 1 +} + +variable "master_server_group_policy" { + description = "desired server group policy, e.g. anti-affinity" + default = "" +} + +variable "node_server_group_policy" { + description = "desired server group policy, e.g. anti-affinity" + default = "" +} + +variable "etcd_server_group_policy" { + description = "desired server group policy, e.g. anti-affinity" + default = "" +} + +variable "router_id" { + description = "uuid of an externally defined router to use" + default = null +} + +variable "router_internal_port_id" { + description = "uuid of the port connection our router to our network" + default = null +} + +variable "k8s_masters" { + default = {} +} + +variable "k8s_nodes" { + default = {} +} + +variable "extra_sec_groups" { + default = false +} + +variable "extra_sec_groups_name" { + default = "custom" +} + +variable "image_uuid" { + description = "uuid of image inside openstack to use" + default = "" +} + +variable "image_gfs_uuid" { + description = "uuid of image to be used on gluster fs nodes. If empty defaults to image_uuid" + default = "" +} + +variable "image_master" { + description = "uuid of image inside openstack to use" + default = "" +} + +variable "image_master_uuid" { + description = "uuid of image to be used on master nodes. If empty defaults to image_uuid" + default = "" +} + +variable "group_vars_path" { + description = "path to the inventory group vars directory" + type = string + default = "./group_vars" +} diff --git a/kubespray/contrib/terraform/openstack/versions.tf b/kubespray/contrib/terraform/openstack/versions.tf new file mode 100644 index 0000000..9541063 --- /dev/null +++ b/kubespray/contrib/terraform/openstack/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + openstack = { + source = "terraform-provider-openstack/openstack" + version = "~> 1.17" + } + } + required_version = ">= 0.12.26" +} diff --git a/kubespray/contrib/terraform/terraform.py b/kubespray/contrib/terraform/terraform.py new file mode 100755 index 0000000..6c89e1c --- /dev/null +++ b/kubespray/contrib/terraform/terraform.py @@ -0,0 +1,458 @@ +#!/usr/bin/env python3 +# +# Copyright 2015 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# original: https://github.com/CiscoCloud/terraform.py + +"""\ +Dynamic inventory for Terraform - finds all `.tfstate` files below the working +directory and generates an inventory based on them. +""" +import argparse +from collections import defaultdict +import random +from functools import wraps +import json +import os +import re + +VERSION = '0.4.0pre' + + +def tfstates(root=None): + root = root or os.getcwd() + for dirpath, _, filenames in os.walk(root): + for name in filenames: + if os.path.splitext(name)[-1] == '.tfstate': + yield os.path.join(dirpath, name) + +def convert_to_v3_structure(attributes, prefix=''): + """ Convert the attributes from v4 to v3 + Receives a dict and return a dictionary """ + result = {} + if isinstance(attributes, str): + # In the case when we receive a string (e.g. values for security_groups) + return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes} + for key, value in attributes.items(): + if isinstance(value, list): + if len(value): + result['{}{}.#'.format(prefix, key, hash)] = len(value) + for i, v in enumerate(value): + result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i))) + elif isinstance(value, dict): + result['{}{}.%'.format(prefix, key)] = len(value) + for k, v in value.items(): + result['{}{}.{}'.format(prefix, key, k)] = v + else: + result['{}{}'.format(prefix, key)] = value + return result + +def iterresources(filenames): + for filename in filenames: + with open(filename, 'r') as json_file: + state = json.load(json_file) + tf_version = state['version'] + if tf_version == 3: + for module in state['modules']: + name = module['path'][-1] + for key, resource in module['resources'].items(): + yield name, key, resource + elif tf_version == 4: + # In version 4 the structure changes so we need to iterate + # each instance inside the resource branch. + for resource in state['resources']: + name = resource['provider'].split('.')[-1] + for instance in resource['instances']: + key = "{}.{}".format(resource['type'], resource['name']) + if 'index_key' in instance: + key = "{}.{}".format(key, instance['index_key']) + data = {} + data['type'] = resource['type'] + data['provider'] = resource['provider'] + data['depends_on'] = instance.get('depends_on', []) + data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])} + if 'id' in instance['attributes']: + data['primary']['id'] = instance['attributes']['id'] + data['primary']['meta'] = instance['attributes'].get('meta',{}) + yield name, key, data + else: + raise KeyError('tfstate version %d not supported' % tf_version) + + +## READ RESOURCES +PARSERS = {} + + +def _clean_dc(dcname): + # Consul DCs are strictly alphanumeric with underscores and hyphens - + # ensure that the consul_dc attribute meets these requirements. + return re.sub('[^\w_\-]', '-', dcname) + + +def iterhosts(resources): + '''yield host tuples of (name, attributes, groups)''' + for module_name, key, resource in resources: + resource_type, name = key.split('.', 1) + try: + parser = PARSERS[resource_type] + except KeyError: + continue + + yield parser(resource, module_name) + + +def iterips(resources): + '''yield ip tuples of (port_id, ip)''' + for module_name, key, resource in resources: + resource_type, name = key.split('.', 1) + if resource_type == 'openstack_networking_floatingip_associate_v2': + yield openstack_floating_ips(resource) + + +def parses(prefix): + def inner(func): + PARSERS[prefix] = func + return func + + return inner + + +def calculate_mantl_vars(func): + """calculate Mantl vars""" + + @wraps(func) + def inner(*args, **kwargs): + name, attrs, groups = func(*args, **kwargs) + + # attrs + if attrs.get('role', '') == 'control': + attrs['consul_is_server'] = True + else: + attrs['consul_is_server'] = False + + # groups + if attrs.get('publicly_routable', False): + groups.append('publicly_routable') + + return name, attrs, groups + + return inner + + +def _parse_prefix(source, prefix, sep='.'): + for compkey, value in list(source.items()): + try: + curprefix, rest = compkey.split(sep, 1) + except ValueError: + continue + + if curprefix != prefix or rest == '#': + continue + + yield rest, value + + +def parse_attr_list(source, prefix, sep='.'): + attrs = defaultdict(dict) + for compkey, value in _parse_prefix(source, prefix, sep): + idx, key = compkey.split(sep, 1) + attrs[idx][key] = value + + return list(attrs.values()) + + +def parse_dict(source, prefix, sep='.'): + return dict(_parse_prefix(source, prefix, sep)) + + +def parse_list(source, prefix, sep='.'): + return [value for _, value in _parse_prefix(source, prefix, sep)] + + +def parse_bool(string_form): + if type(string_form) is bool: + return string_form + + token = string_form.lower()[0] + + if token == 't': + return True + elif token == 'f': + return False + else: + raise ValueError('could not convert %r to a bool' % string_form) + + +@parses('metal_device') +def metal_device(resource, tfvars=None): + raw_attrs = resource['primary']['attributes'] + name = raw_attrs['hostname'] + groups = [] + + attrs = { + 'id': raw_attrs['id'], + 'facilities': parse_list(raw_attrs, 'facilities'), + 'hostname': raw_attrs['hostname'], + 'operating_system': raw_attrs['operating_system'], + 'locked': parse_bool(raw_attrs['locked']), + 'tags': parse_list(raw_attrs, 'tags'), + 'plan': raw_attrs['plan'], + 'project_id': raw_attrs['project_id'], + 'state': raw_attrs['state'], + # ansible + 'ansible_host': raw_attrs['network.0.address'], + 'ansible_ssh_user': 'root', # Use root by default in metal + # generic + 'ipv4_address': raw_attrs['network.0.address'], + 'public_ipv4': raw_attrs['network.0.address'], + 'ipv6_address': raw_attrs['network.1.address'], + 'public_ipv6': raw_attrs['network.1.address'], + 'private_ipv4': raw_attrs['network.2.address'], + 'provider': 'metal', + } + + if raw_attrs['operating_system'] == 'flatcar_stable': + # For Flatcar set the ssh_user to core + attrs.update({'ansible_ssh_user': 'core'}) + + # add groups based on attrs + groups.append('metal_operating_system=' + attrs['operating_system']) + groups.append('metal_locked=%s' % attrs['locked']) + groups.append('metal_state=' + attrs['state']) + groups.append('metal_plan=' + attrs['plan']) + + # groups specific to kubespray + groups = groups + attrs['tags'] + + return name, attrs, groups + + +def openstack_floating_ips(resource): + raw_attrs = resource['primary']['attributes'] + attrs = { + 'ip': raw_attrs['floating_ip'], + 'port_id': raw_attrs['port_id'], + } + return attrs + +def openstack_floating_ips(resource): + raw_attrs = resource['primary']['attributes'] + return raw_attrs['port_id'], raw_attrs['floating_ip'] + +@parses('openstack_compute_instance_v2') +@calculate_mantl_vars +def openstack_host(resource, module_name): + raw_attrs = resource['primary']['attributes'] + name = raw_attrs['name'] + groups = [] + + attrs = { + 'access_ip_v4': raw_attrs['access_ip_v4'], + 'access_ip_v6': raw_attrs['access_ip_v6'], + 'access_ip': raw_attrs['access_ip_v4'], + 'ip': raw_attrs['network.0.fixed_ip_v4'], + 'flavor': parse_dict(raw_attrs, 'flavor', + sep='_'), + 'id': raw_attrs['id'], + 'image': parse_dict(raw_attrs, 'image', + sep='_'), + 'key_pair': raw_attrs['key_pair'], + 'metadata': parse_dict(raw_attrs, 'metadata'), + 'network': parse_attr_list(raw_attrs, 'network'), + 'region': raw_attrs.get('region', ''), + 'security_groups': parse_list(raw_attrs, 'security_groups'), + # ansible + 'ansible_ssh_port': 22, + # workaround for an OpenStack bug where hosts have a different domain + # after they're restarted + 'host_domain': 'novalocal', + 'use_host_domain': True, + # generic + 'public_ipv4': raw_attrs['access_ip_v4'], + 'private_ipv4': raw_attrs['access_ip_v4'], + 'port_id' : raw_attrs['network.0.port'], + 'provider': 'openstack', + } + + if 'floating_ip' in raw_attrs: + attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4'] + + try: + if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1": + attrs.update({ + 'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']), + 'publicly_routable': True, + }) + else: + attrs.update({ + 'ansible_host': raw_attrs['access_ip_v4'], + 'publicly_routable': True, + }) + except (KeyError, ValueError): + attrs.update({'ansible_host': '', 'publicly_routable': False}) + + # Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017 + + # attrs specific to Ansible + if 'metadata.ssh_user' in raw_attrs: + attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user'] + + if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0: + device_index = 1 + for key, value in list(raw_attrs.items()): + match = re.search("^volume.*.device$", key) + if match: + attrs['disk_volume_device_'+str(device_index)] = value + device_index += 1 + + + # attrs specific to Mantl + attrs.update({ + 'role': attrs['metadata'].get('role', 'none') + }) + + # add groups based on attrs + groups.append('os_image=' + str(attrs['image']['id'])) + groups.append('os_flavor=' + str(attrs['flavor']['name'])) + groups.extend('os_metadata_%s=%s' % item + for item in list(attrs['metadata'].items())) + groups.append('os_region=' + str(attrs['region'])) + + # groups specific to kubespray + for group in attrs['metadata'].get('kubespray_groups', "").split(","): + groups.append(group) + + return name, attrs, groups + + +def iter_host_ips(hosts, ips): + '''Update hosts that have an entry in the floating IP list''' + for host in hosts: + port_id = host[1]['port_id'] + + if port_id in ips: + ip = ips[port_id] + + host[1].update({ + 'access_ip_v4': ip, + 'access_ip': ip, + 'public_ipv4': ip, + 'ansible_host': ip, + }) + + if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0": + host[1].pop('access_ip') + + yield host + + +## QUERY TYPES +def query_host(hosts, target): + for name, attrs, _ in hosts: + if name == target: + return attrs + + return {} + + +def query_list(hosts): + groups = defaultdict(dict) + meta = {} + + for name, attrs, hostgroups in hosts: + for group in set(hostgroups): + # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf + # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all" + if not group: group = "all" + + groups[group].setdefault('hosts', []) + groups[group]['hosts'].append(name) + + meta[name] = attrs + + groups['_meta'] = {'hostvars': meta} + return groups + + +def query_hostfile(hosts): + out = ['## begin hosts generated by terraform.py ##'] + out.extend( + '{}\t{}'.format(attrs['ansible_host'].ljust(16), name) + for name, attrs, _ in hosts + ) + + out.append('## end hosts generated by terraform.py ##') + return '\n'.join(out) + + +def main(): + parser = argparse.ArgumentParser( + __file__, __doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) + modes = parser.add_mutually_exclusive_group(required=True) + modes.add_argument('--list', + action='store_true', + help='list all variables') + modes.add_argument('--host', help='list variables for a single host') + modes.add_argument('--version', + action='store_true', + help='print version and exit') + modes.add_argument('--hostfile', + action='store_true', + help='print hosts as a /etc/hosts snippet') + parser.add_argument('--pretty', + action='store_true', + help='pretty-print output JSON') + parser.add_argument('--nometa', + action='store_true', + help='with --list, exclude hostvars') + default_root = os.environ.get('TERRAFORM_STATE_ROOT', + os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', '..', ))) + parser.add_argument('--root', + default=default_root, + help='custom root to search for `.tfstate`s in') + + args = parser.parse_args() + + if args.version: + print('%s %s' % (__file__, VERSION)) + parser.exit() + + hosts = iterhosts(iterresources(tfstates(args.root))) + + # Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts + ips = dict(iterips(iterresources(tfstates(args.root)))) + + if ips: + hosts = iter_host_ips(hosts, ips) + + if args.list: + output = query_list(hosts) + if args.nometa: + del output['_meta'] + print(json.dumps(output, indent=4 if args.pretty else None)) + elif args.host: + output = query_host(hosts, args.host) + print(json.dumps(output, indent=4 if args.pretty else None)) + elif args.hostfile: + output = query_hostfile(hosts) + print(output) + + parser.exit() + + +if __name__ == '__main__': + main() diff --git a/kubespray/contrib/terraform/upcloud/README.md b/kubespray/contrib/terraform/upcloud/README.md new file mode 100644 index 0000000..5689831 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/README.md @@ -0,0 +1,139 @@ +# Kubernetes on UpCloud with Terraform + +Provision a Kubernetes cluster on [UpCloud](https://upcloud.com/) using Terraform and Kubespray + +## Overview + +The setup looks like following + +```text + Kubernetes cluster ++--------------------------+ +| +--------------+ | +| | +--------------+ | +| --> | | | | +| | | Master/etcd | | +| | | node(s) | | +| +-+ | | +| +--------------+ | +| ^ | +| | | +| v | +| +--------------+ | +| | +--------------+ | +| --> | | | | +| | | Worker | | +| | | node(s) | | +| +-+ | | +| +--------------+ | ++--------------------------+ +``` + +The nodes uses a private network for node to node communication and a public interface for all external communication. + +## Requirements + +* Terraform 0.13.0 or newer + +## Quickstart + +NOTE: Assumes you are at the root of the kubespray repo. + +For authentication in your cluster you can use the environment variables. + +```bash +export TF_VAR_UPCLOUD_USERNAME=username +export TF_VAR_UPCLOUD_PASSWORD=password +``` + +To allow API access to your UpCloud account, you need to allow API connections by visiting [Account-page](https://hub.upcloud.com/account) in your UpCloud Hub. + +Copy the cluster configuration file. + +```bash +CLUSTER=my-upcloud-cluster +cp -r inventory/sample inventory/$CLUSTER +cp contrib/terraform/upcloud/cluster-settings.tfvars inventory/$CLUSTER/ +export ANSIBLE_CONFIG=ansible.cfg +cd inventory/$CLUSTER +``` + +Edit `cluster-settings.tfvars` to match your requirement. + +Run Terraform to create the infrastructure. + +```bash +terraform init ../../contrib/terraform/upcloud +terraform apply --var-file cluster-settings.tfvars \ + -state=tfstate-$CLUSTER.tfstate \ + ../../contrib/terraform/upcloud/ +``` + +You should now have a inventory file named `inventory.ini` that you can use with kubespray. +You can use the inventory file with kubespray to set up a cluster. + +It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: + +```bash +ansible -i inventory.ini -m ping all +``` + +You can setup Kubernetes with kubespray using the generated inventory: + +```bash +ansible-playbook -i inventory.ini ../../cluster.yml -b -v +``` + +## Teardown + +You can teardown your infrastructure using the following Terraform command: + +```bash +terraform destroy --var-file cluster-settings.tfvars \ + -state=tfstate-$CLUSTER.tfstate \ + ../../contrib/terraform/upcloud/ +``` + +## Variables + +* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix +* `template_name`: The name or UUID of a base image +* `username`: a user to access the nodes, defaults to "ubuntu" +* `private_network_cidr`: CIDR to use for the private network, defaults to "172.16.0.0/24" +* `ssh_public_keys`: List of public SSH keys to install on all machines +* `zone`: The zone where to run the cluster +* `machines`: Machines to provision. Key of this object will be used as the name of the machine + * `node_type`: The role of this node *(master|worker)* + * `plan`: Preconfigured cpu/mem plan to use (disables `cpu` and `mem` attributes below) + * `cpu`: number of cpu cores + * `mem`: memory size in MB + * `disk_size`: The size of the storage in GB + * `additional_disks`: Additional disks to attach to the node. + * `size`: The size of the additional disk in GB + * `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm) +* `firewall_enabled`: Enable firewall rules +* `firewall_default_deny_in`: Set the firewall to deny inbound traffic by default. Automatically adds UpCloud DNS server and NTP port allowlisting. +* `firewall_default_deny_out`: Set the firewall to deny outbound traffic by default. +* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters + * `start_address`: Start of address range to allow + * `end_address`: End of address range to allow +* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes + * `start_address`: Start of address range to allow + * `end_address`: End of address range to allow +* `master_allowed_ports`: List of port ranges that should be allowed to access the masters + * `protocol`: Protocol *(tcp|udp|icmp)* + * `port_range_min`: Start of port range to allow + * `port_range_max`: End of port range to allow + * `start_address`: Start of address range to allow + * `end_address`: End of address range to allow +* `worker_allowed_ports`: List of port ranges that should be allowed to access the workers + * `protocol`: Protocol *(tcp|udp|icmp)* + * `port_range_min`: Start of port range to allow + * `port_range_max`: End of port range to allow + * `start_address`: Start of address range to allow + * `end_address`: End of address range to allow +* `loadbalancer_enabled`: Enable managed load balancer +* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)* +* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends + * `port`: Port to load balance. + * `backend_servers`: List of servers that traffic to the port should be forwarded to. diff --git a/kubespray/contrib/terraform/upcloud/cluster-settings.tfvars b/kubespray/contrib/terraform/upcloud/cluster-settings.tfvars new file mode 100644 index 0000000..873150b --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/cluster-settings.tfvars @@ -0,0 +1,130 @@ +# See: https://developers.upcloud.com/1.3/5-zones/ +zone = "fi-hel1" +username = "ubuntu" + +# Prefix to use for all resources to separate them from other resources +prefix = "kubespray" + +inventory_file = "inventory.ini" + +# Set the operating system using UUID or exact name +template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)" + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa public key 1", + "ssh-rsa public key 2", +] + +# check list of available plan https://developers.upcloud.com/1.3/7-plans/ +machines = { + "master-0" : { + "node_type" : "master", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks" : {} + }, + "worker-0" : { + "node_type" : "worker", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks" : { + # "some-disk-name-1": { + # "size": 100, + # "tier": "maxiops", + # }, + # "some-disk-name-2": { + # "size": 100, + # "tier": "maxiops", + # } + } + }, + "worker-1" : { + "node_type" : "worker", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks" : { + # "some-disk-name-1": { + # "size": 100, + # "tier": "maxiops", + # }, + # "some-disk-name-2": { + # "size": 100, + # "tier": "maxiops", + # } + } + }, + "worker-2" : { + "node_type" : "worker", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks" : { + # "some-disk-name-1": { + # "size": 100, + # "tier": "maxiops", + # }, + # "some-disk-name-2": { + # "size": 100, + # "tier": "maxiops", + # } + } + } +} + +firewall_enabled = false +firewall_default_deny_in = false +firewall_default_deny_out = false + +master_allowed_remote_ips = [ + { + "start_address" : "0.0.0.0" + "end_address" : "255.255.255.255" + } +] + +k8s_allowed_remote_ips = [ + { + "start_address" : "0.0.0.0" + "end_address" : "255.255.255.255" + } +] + +master_allowed_ports = [] +worker_allowed_ports = [] + +loadbalancer_enabled = false +loadbalancer_plan = "development" +loadbalancers = { + # "http" : { + # "port" : 80, + # "backend_servers" : [ + # "worker-0", + # "worker-1", + # "worker-2" + # ] + # } +} diff --git a/kubespray/contrib/terraform/upcloud/main.tf b/kubespray/contrib/terraform/upcloud/main.tf new file mode 100644 index 0000000..9904ce2 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/main.tf @@ -0,0 +1,71 @@ + +terraform { + required_version = ">= 0.13.0" +} +provider "upcloud" { + # Your UpCloud credentials are read from environment variables: + username = var.UPCLOUD_USERNAME + password = var.UPCLOUD_PASSWORD +} + +module "kubernetes" { + source = "./modules/kubernetes-cluster" + + prefix = var.prefix + zone = var.zone + + template_name = var.template_name + username = var.username + + private_network_cidr = var.private_network_cidr + + machines = var.machines + + ssh_public_keys = var.ssh_public_keys + + firewall_enabled = var.firewall_enabled + firewall_default_deny_in = var.firewall_default_deny_in + firewall_default_deny_out = var.firewall_default_deny_out + master_allowed_remote_ips = var.master_allowed_remote_ips + k8s_allowed_remote_ips = var.k8s_allowed_remote_ips + master_allowed_ports = var.master_allowed_ports + worker_allowed_ports = var.worker_allowed_ports + + loadbalancer_enabled = var.loadbalancer_enabled + loadbalancer_plan = var.loadbalancer_plan + loadbalancers = var.loadbalancers +} + +# +# Generate ansible inventory +# + +data "template_file" "inventory" { + template = file("${path.module}/templates/inventory.tpl") + + vars = { + connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", + keys(module.kubernetes.master_ip), + values(module.kubernetes.master_ip).*.public_ip, + values(module.kubernetes.master_ip).*.private_ip, + range(1, length(module.kubernetes.master_ip) + 1))) + connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", + keys(module.kubernetes.worker_ip), + values(module.kubernetes.worker_ip).*.public_ip, + values(module.kubernetes.worker_ip).*.private_ip)) + list_master = join("\n", formatlist("%s", + keys(module.kubernetes.master_ip))) + list_worker = join("\n", formatlist("%s", + keys(module.kubernetes.worker_ip))) + } +} + +resource "null_resource" "inventories" { + provisioner "local-exec" { + command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" + } + + triggers = { + template = data.template_file.inventory.rendered + } +} diff --git a/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf new file mode 100644 index 0000000..afa8019 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf @@ -0,0 +1,550 @@ +locals { + # Create a list of all disks to create + disks = flatten([ + for node_name, machine in var.machines : [ + for disk_name, disk in machine.additional_disks : { + disk = disk + disk_name = disk_name + node_name = node_name + } + ] + ]) + + lb_backend_servers = flatten([ + for lb_name, loadbalancer in var.loadbalancers : [ + for backend_server in loadbalancer.backend_servers : { + port = loadbalancer.port + lb_name = lb_name + server_name = backend_server + } + ] + ]) + + # If prefix is set, all resources will be prefixed with "${var.prefix}-" + # Else don't prefix with anything + resource-prefix = "%{ if var.prefix != ""}${var.prefix}-%{ endif }" +} + +resource "upcloud_network" "private" { + name = "${local.resource-prefix}k8s-network" + zone = var.zone + + ip_network { + address = var.private_network_cidr + dhcp = true + family = "IPv4" + } +} + +resource "upcloud_storage" "additional_disks" { + for_each = { + for disk in local.disks: "${disk.node_name}_${disk.disk_name}" => disk.disk + } + + size = each.value.size + tier = each.value.tier + title = "${local.resource-prefix}${each.key}" + zone = var.zone +} + +resource "upcloud_server" "master" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "master" + } + + hostname = "${local.resource-prefix}${each.key}" + plan = each.value.plan + cpu = each.value.plan == null ? each.value.cpu : null + mem = each.value.plan == null ? each.value.mem : null + zone = var.zone + + template { + storage = var.template_name + size = each.value.disk_size + } + + # Public network interface + network_interface { + type = "public" + } + + # Private network interface + network_interface { + type = "private" + network = upcloud_network.private.id + } + + # Ignore volumes created by csi-driver + lifecycle { + ignore_changes = [storage_devices] + } + + firewall = var.firewall_enabled + + dynamic "storage_devices" { + for_each = { + for disk_key_name, disk in upcloud_storage.additional_disks : + disk_key_name => disk + # Only add the disk if it matches the node name in the start of its name + if length(regexall("^${each.key}_.+", disk_key_name)) > 0 + } + + content { + storage = storage_devices.value.id + } + } + + # Include at least one public SSH key + login { + user = var.username + keys = var.ssh_public_keys + create_password = false + } +} + +resource "upcloud_server" "worker" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "worker" + } + + hostname = "${local.resource-prefix}${each.key}" + plan = each.value.plan + cpu = each.value.plan == null ? each.value.cpu : null + mem = each.value.plan == null ? each.value.mem : null + zone = var.zone + + template { + storage = var.template_name + size = each.value.disk_size + } + + # Public network interface + network_interface { + type = "public" + } + + # Private network interface + network_interface { + type = "private" + network = upcloud_network.private.id + } + + # Ignore volumes created by csi-driver + lifecycle { + ignore_changes = [storage_devices] + } + + firewall = var.firewall_enabled + + dynamic "storage_devices" { + for_each = { + for disk_key_name, disk in upcloud_storage.additional_disks : + disk_key_name => disk + # Only add the disk if it matches the node name in the start of its name + if length(regexall("^${each.key}_.+", disk_key_name)) > 0 + } + + content { + storage = storage_devices.value.id + } + } + + # Include at least one public SSH key + login { + user = var.username + keys = var.ssh_public_keys + create_password = false + } +} + +resource "upcloud_firewall_rules" "master" { + for_each = upcloud_server.master + server_id = each.value.id + + dynamic firewall_rule { + for_each = var.master_allowed_remote_ips + + content { + action = "accept" + comment = "Allow master API access from this network" + destination_port_end = "6443" + destination_port_start = "6443" + direction = "in" + family = "IPv4" + protocol = "tcp" + source_address_end = firewall_rule.value.end_address + source_address_start = firewall_rule.value.start_address + } + } + + dynamic firewall_rule { + for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : [] + + content { + action = "drop" + comment = "Deny master API access from other networks" + destination_port_end = "6443" + destination_port_start = "6443" + direction = "in" + family = "IPv4" + protocol = "tcp" + source_address_end = "255.255.255.255" + source_address_start = "0.0.0.0" + } + } + + dynamic firewall_rule { + for_each = var.k8s_allowed_remote_ips + + content { + action = "accept" + comment = "Allow SSH from this network" + destination_port_end = "22" + destination_port_start = "22" + direction = "in" + family = "IPv4" + protocol = "tcp" + source_address_end = firewall_rule.value.end_address + source_address_start = firewall_rule.value.start_address + } + } + + dynamic firewall_rule { + for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : [] + + content { + action = "drop" + comment = "Deny SSH from other networks" + destination_port_end = "22" + destination_port_start = "22" + direction = "in" + family = "IPv4" + protocol = "tcp" + source_address_end = "255.255.255.255" + source_address_start = "0.0.0.0" + } + } + + dynamic firewall_rule { + for_each = var.master_allowed_ports + + content { + action = "accept" + comment = "Allow access on this port" + destination_port_end = firewall_rule.value.port_range_max + destination_port_start = firewall_rule.value.port_range_min + direction = "in" + family = "IPv4" + protocol = firewall_rule.value.protocol + source_address_end = firewall_rule.value.end_address + source_address_start = firewall_rule.value.start_address + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv4" + protocol = firewall_rule.value + source_address_end = "94.237.40.9" + source_address_start = "94.237.40.9" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv4" + protocol = firewall_rule.value + source_address_end = "94.237.127.9" + source_address_start = "94.237.127.9" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv6" + protocol = firewall_rule.value + source_address_end = "2a04:3540:53::1" + source_address_start = "2a04:3540:53::1" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv6" + protocol = firewall_rule.value + source_address_end = "2a04:3544:53::1" + source_address_start = "2a04:3544:53::1" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["udp"] : [] + + content { + action = "accept" + comment = "NTP Port" + source_port_end = "123" + source_port_start = "123" + direction = "in" + family = "IPv4" + protocol = firewall_rule.value + source_address_end = "255.255.255.255" + source_address_start = "0.0.0.0" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["udp"] : [] + + content { + action = "accept" + comment = "NTP Port" + source_port_end = "123" + source_port_start = "123" + direction = "in" + family = "IPv6" + protocol = firewall_rule.value + } + } + + firewall_rule { + action = var.firewall_default_deny_in ? "drop" : "accept" + direction = "in" + } + + firewall_rule { + action = var.firewall_default_deny_out ? "drop" : "accept" + direction = "out" + } +} + +resource "upcloud_firewall_rules" "k8s" { + for_each = upcloud_server.worker + server_id = each.value.id + + dynamic firewall_rule { + for_each = var.k8s_allowed_remote_ips + + content { + action = "accept" + comment = "Allow SSH from this network" + destination_port_end = "22" + destination_port_start = "22" + direction = "in" + family = "IPv4" + protocol = "tcp" + source_address_end = firewall_rule.value.end_address + source_address_start = firewall_rule.value.start_address + } + } + + dynamic firewall_rule { + for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : [] + + content { + action = "drop" + comment = "Deny SSH from other networks" + destination_port_end = "22" + destination_port_start = "22" + direction = "in" + family = "IPv4" + protocol = "tcp" + source_address_end = "255.255.255.255" + source_address_start = "0.0.0.0" + } + } + + dynamic firewall_rule { + for_each = var.worker_allowed_ports + + content { + action = "accept" + comment = "Allow access on this port" + destination_port_end = firewall_rule.value.port_range_max + destination_port_start = firewall_rule.value.port_range_min + direction = "in" + family = "IPv4" + protocol = firewall_rule.value.protocol + source_address_end = firewall_rule.value.end_address + source_address_start = firewall_rule.value.start_address + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv4" + protocol = firewall_rule.value + source_address_end = "94.237.40.9" + source_address_start = "94.237.40.9" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv4" + protocol = firewall_rule.value + source_address_end = "94.237.127.9" + source_address_start = "94.237.127.9" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv6" + protocol = firewall_rule.value + source_address_end = "2a04:3540:53::1" + source_address_start = "2a04:3540:53::1" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] + + content { + action = "accept" + comment = "UpCloud DNS" + source_port_end = "53" + source_port_start = "53" + direction = "in" + family = "IPv6" + protocol = firewall_rule.value + source_address_end = "2a04:3544:53::1" + source_address_start = "2a04:3544:53::1" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["udp"] : [] + + content { + action = "accept" + comment = "NTP Port" + source_port_end = "123" + source_port_start = "123" + direction = "in" + family = "IPv4" + protocol = firewall_rule.value + source_address_end = "255.255.255.255" + source_address_start = "0.0.0.0" + } + } + + dynamic firewall_rule { + for_each = var.firewall_default_deny_in ? ["udp"] : [] + + content { + action = "accept" + comment = "NTP Port" + source_port_end = "123" + source_port_start = "123" + direction = "in" + family = "IPv6" + protocol = firewall_rule.value + } + } + + firewall_rule { + action = var.firewall_default_deny_in ? "drop" : "accept" + direction = "in" + } + + firewall_rule { + action = var.firewall_default_deny_out ? "drop" : "accept" + direction = "out" + } +} + +resource "upcloud_loadbalancer" "lb" { + count = var.loadbalancer_enabled ? 1 : 0 + configured_status = "started" + name = "${local.resource-prefix}lb" + plan = var.loadbalancer_plan + zone = var.zone + network = upcloud_network.private.id +} + +resource "upcloud_loadbalancer_backend" "lb_backend" { + for_each = var.loadbalancer_enabled ? var.loadbalancers : {} + + loadbalancer = upcloud_loadbalancer.lb[0].id + name = "lb-backend-${each.key}" +} + +resource "upcloud_loadbalancer_frontend" "lb_frontend" { + for_each = var.loadbalancer_enabled ? var.loadbalancers : {} + + loadbalancer = upcloud_loadbalancer.lb[0].id + name = "lb-frontend-${each.key}" + mode = "tcp" + port = each.value.port + default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name +} + +resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" { + for_each = { + for be_server in local.lb_backend_servers: + "${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server + if var.loadbalancer_enabled + } + + backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id + name = "${local.resource-prefix}${each.key}" + ip = merge(upcloud_server.master, upcloud_server.worker)[each.value.server_name].network_interface[1].ip_address + port = each.value.port + weight = 100 + max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000 + enabled = true +} diff --git a/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf new file mode 100644 index 0000000..c1f8c7c --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf @@ -0,0 +1,24 @@ + +output "master_ip" { + value = { + for instance in upcloud_server.master : + instance.hostname => { + "public_ip": instance.network_interface[0].ip_address + "private_ip": instance.network_interface[1].ip_address + } + } +} + +output "worker_ip" { + value = { + for instance in upcloud_server.worker : + instance.hostname => { + "public_ip": instance.network_interface[0].ip_address + "private_ip": instance.network_interface[1].ip_address + } + } +} + +output "loadbalancer_domain" { + value = var.loadbalancer_enabled ? upcloud_loadbalancer.lb[0].dns_name : null +} diff --git a/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..1b33444 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf @@ -0,0 +1,95 @@ +variable "prefix" { + type = string +} + +variable "zone" { + type = string +} + +variable "template_name" {} + +variable "username" {} + +variable "private_network_cidr" {} + +variable "machines" { + description = "Cluster machines" + type = map(object({ + node_type = string + plan = string + cpu = string + mem = string + disk_size = number + additional_disks = map(object({ + size = number + tier = string + })) + })) +} + +variable "ssh_public_keys" { + type = list(string) +} + +variable "firewall_enabled" { + type = bool +} + +variable "master_allowed_remote_ips" { + type = list(object({ + start_address = string + end_address = string + })) +} + +variable "k8s_allowed_remote_ips" { + type = list(object({ + start_address = string + end_address = string + })) +} + +variable "master_allowed_ports" { + type = list(object({ + protocol = string + port_range_min = number + port_range_max = number + start_address = string + end_address = string + })) +} + +variable "worker_allowed_ports" { + type = list(object({ + protocol = string + port_range_min = number + port_range_max = number + start_address = string + end_address = string + })) +} + +variable "firewall_default_deny_in" { + type = bool +} + +variable "firewall_default_deny_out" { + type = bool +} + +variable "loadbalancer_enabled" { + type = bool +} + +variable "loadbalancer_plan" { + type = string +} + +variable "loadbalancers" { + description = "Load balancers" + + type = map(object({ + port = number + backend_servers = list(string) + })) +} diff --git a/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf new file mode 100644 index 0000000..447ba84 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf @@ -0,0 +1,10 @@ + +terraform { + required_providers { + upcloud = { + source = "UpCloudLtd/upcloud" + version = "~>2.5.0" + } + } + required_version = ">= 0.13" +} diff --git a/kubespray/contrib/terraform/upcloud/output.tf b/kubespray/contrib/terraform/upcloud/output.tf new file mode 100644 index 0000000..006e3b1 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/output.tf @@ -0,0 +1,12 @@ + +output "master_ip" { + value = module.kubernetes.master_ip +} + +output "worker_ip" { + value = module.kubernetes.worker_ip +} + +output "loadbalancer_domain" { + value = module.kubernetes.loadbalancer_domain +} diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/cluster.tfvars b/kubespray/contrib/terraform/upcloud/sample-inventory/cluster.tfvars new file mode 100644 index 0000000..b98a853 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/cluster.tfvars @@ -0,0 +1,131 @@ +# See: https://developers.upcloud.com/1.3/5-zones/ +zone = "fi-hel1" +username = "ubuntu" + +# Prefix to use for all resources to separate them from other resources +prefix = "kubespray" + +inventory_file = "inventory.ini" + +# Set the operating system using UUID or exact name +template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)" + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa I-did-not-read-the-docs", + "ssh-rsa I-did-not-read-the-docs 2", +] + +# check list of available plan https://developers.upcloud.com/1.3/7-plans/ +machines = { + "master-0" : { + "node_type" : "master", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks": {} + }, + "worker-0" : { + "node_type" : "worker", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks": { + # "some-disk-name-1": { + # "size": 100, + # "tier": "maxiops", + # }, + # "some-disk-name-2": { + # "size": 100, + # "tier": "maxiops", + # } + } + }, + "worker-1" : { + "node_type" : "worker", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks": { + # "some-disk-name-1": { + # "size": 100, + # "tier": "maxiops", + # }, + # "some-disk-name-2": { + # "size": 100, + # "tier": "maxiops", + # } + } + }, + "worker-2" : { + "node_type" : "worker", + # plan to use instead of custom cpu/mem + "plan" : null, + #number of cpu cores + "cpu" : "2", + #memory size in MB + "mem" : "4096" + # The size of the storage in GB + "disk_size" : 250 + "additional_disks": { + # "some-disk-name-1": { + # "size": 100, + # "tier": "maxiops", + # }, + # "some-disk-name-2": { + # "size": 100, + # "tier": "maxiops", + # } + } + } +} + +firewall_enabled = false +firewall_default_deny_in = false +firewall_default_deny_out = false + + +master_allowed_remote_ips = [ + { + "start_address" : "0.0.0.0" + "end_address" : "255.255.255.255" + } +] + +k8s_allowed_remote_ips = [ + { + "start_address" : "0.0.0.0" + "end_address" : "255.255.255.255" + } +] + +master_allowed_ports = [] +worker_allowed_ports = [] + +loadbalancer_enabled = false +loadbalancer_plan = "development" +loadbalancers = { + # "http" : { + # "port" : 80, + # "backend_servers" : [ + # "worker-0", + # "worker-1", + # "worker-2" + # ] + # } +} diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/all.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/aws.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/azure.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/containerd.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/coreos.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/docker.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/etcd.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/gcp.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/oci.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/offline.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/openstack.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/etcd.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/upcloud/templates/inventory.tpl b/kubespray/contrib/terraform/upcloud/templates/inventory.tpl new file mode 100644 index 0000000..28ff28a --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/templates/inventory.tpl @@ -0,0 +1,17 @@ + +[all] +${connection_strings_master} +${connection_strings_worker} + +[kube_control_plane] +${list_master} + +[etcd] +${list_master} + +[kube_node] +${list_worker} + +[k8s_cluster:children] +kube_control_plane +kube_node diff --git a/kubespray/contrib/terraform/upcloud/variables.tf b/kubespray/contrib/terraform/upcloud/variables.tf new file mode 100644 index 0000000..5a36048 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/variables.tf @@ -0,0 +1,132 @@ +variable "prefix" { + type = string + default = "kubespray" + + description = "Prefix that is used to distinguish these resources from others" +} + +variable "zone" { + description = "The zone where to run the cluster" +} + +variable "template_name" { + description = "Block describing the preconfigured operating system" +} + +variable "username" { + description = "The username to use for the nodes" + default = "ubuntu" +} + +variable "private_network_cidr" { + description = "CIDR to use for the private network" + default = "172.16.0.0/24" +} + +variable "machines" { + description = "Cluster machines" + + type = map(object({ + node_type = string + plan = string + cpu = string + mem = string + disk_size = number + additional_disks = map(object({ + size = number + tier = string + })) + })) +} + +variable "ssh_public_keys" { + description = "List of public SSH keys which are injected into the VMs." + type = list(string) +} + +variable "inventory_file" { + description = "Where to store the generated inventory file" +} + +variable "UPCLOUD_USERNAME" { + description = "UpCloud username with API access" +} + +variable "UPCLOUD_PASSWORD" { + description = "Password for UpCloud API user" +} + +variable "firewall_enabled" { + description = "Enable firewall rules" + default = false +} + +variable "master_allowed_remote_ips" { + description = "List of IP start/end addresses allowed to access API of masters" + type = list(object({ + start_address = string + end_address = string + })) + default = [] +} + +variable "k8s_allowed_remote_ips" { + description = "List of IP start/end addresses allowed to SSH to hosts" + type = list(object({ + start_address = string + end_address = string + })) + default = [] +} + +variable "master_allowed_ports" { + description = "List of ports to allow on masters" + type = list(object({ + protocol = string + port_range_min = number + port_range_max = number + start_address = string + end_address = string + })) +} + +variable "worker_allowed_ports" { + description = "List of ports to allow on workers" + type = list(object({ + protocol = string + port_range_min = number + port_range_max = number + start_address = string + end_address = string + })) +} + +variable "firewall_default_deny_in" { + description = "Add firewall policies that deny all inbound traffic by default" + default = false +} + +variable "firewall_default_deny_out" { + description = "Add firewall policies that deny all outbound traffic by default" + default = false +} + +variable "loadbalancer_enabled" { + description = "Enable load balancer" + default = false +} + +variable "loadbalancer_plan" { + description = "Load balancer plan (development/production-small)" + default = "development" +} + +variable "loadbalancers" { + description = "Load balancers" + + type = map(object({ + port = number + backend_servers = list(string) + })) + default = {} +} diff --git a/kubespray/contrib/terraform/upcloud/versions.tf b/kubespray/contrib/terraform/upcloud/versions.tf new file mode 100644 index 0000000..48e6820 --- /dev/null +++ b/kubespray/contrib/terraform/upcloud/versions.tf @@ -0,0 +1,10 @@ + +terraform { + required_providers { + upcloud = { + source = "UpCloudLtd/upcloud" + version = "~>2.5.0" + } + } + required_version = ">= 0.13" +} diff --git a/kubespray/contrib/terraform/vsphere/README.md b/kubespray/contrib/terraform/vsphere/README.md new file mode 100644 index 0000000..7aa50d8 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/README.md @@ -0,0 +1,128 @@ +# Kubernetes on vSphere with Terraform + +Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/products/vsphere.html) using Terraform and Kubespray. + +## Overview + +The setup looks like following. + +```text + Kubernetes cluster ++-----------------------+ +| +--------------+ | +| | +--------------+ | +| | | | | +| | | Master/etcd | | +| | | node(s) | | +| +-+ | | +| +--------------+ | +| ^ | +| | | +| v | +| +--------------+ | +| | +--------------+ | +| | | | | +| | | Worker | | +| | | node(s) | | +| +-+ | | +| +--------------+ | ++-----------------------+ +``` + +## Warning + +This setup assumes that the DHCP is disabled in the vSphere cluster and IP addresses have to be provided in the configuration file. + +## Requirements + +* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files) + +## Quickstart + +NOTE: *Assumes you are at the root of the kubespray repo* + +Copy the sample inventory for your cluster and copy the default terraform variables. + +```bash +CLUSTER=my-vsphere-cluster +cp -r inventory/sample inventory/$CLUSTER +cp contrib/terraform/vsphere/default.tfvars inventory/$CLUSTER/ +cd inventory/$CLUSTER +``` + +Edit `default.tfvars` to match your setup. You MUST set values specific for you network and vSphere cluster. + +```bash +# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. +$EDITOR default.tfvars +``` + +For authentication in your vSphere cluster you can use the environment variables. + +```bash +export TF_VAR_vsphere_user=username +export TF_VAR_vsphere_password=password +``` + +Run Terraform to create the infrastructure. + +```bash +terraform init ../../contrib/terraform/vsphere +terraform apply \ + -var-file default.tfvars \ + -state=tfstate-$CLUSTER.tfstate \ + ../../contrib/terraform/vsphere +``` + +You should now have a inventory file named `inventory.ini` that you can use with kubespray. +You can now copy your inventory file and use it with kubespray to set up a cluster. +You can type `terraform output` to find out the IP addresses of the nodes. + +It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: + +```bash +ansible -i inventory.ini -m ping all +``` + +Example to use this with the default sample inventory: + +```bash +ansible-playbook -i inventory.ini ../../cluster.yml -b -v +``` + +## Variables + +### Required + +* `machines`: Machines to provision. Key of this object will be used as the name of the machine + * `node_type`: The role of this node *(master|worker)* + * `ip`: The IP address of the machine + * `netmask`: The netmask to use (to be used on the right hand side in CIDR notation, e.g., `24`) +* `network`: The name of the network to attach the machines to +* `gateway`: The IP address of the network gateway +* `vsphere_datacenter`: The identifier of vSphere data center +* `vsphere_compute_cluster`: The identifier of vSphere compute cluster +* `vsphere_datastore`: The identifier of vSphere data store +* `vsphere_server`: This is the vCenter server name or address for vSphere API operations. +* `ssh_public_keys`: List of public SSH keys to install on all machines +* `template_name`: The name of a base image (the OVF template be defined in vSphere beforehand) + +### Optional + +* `folder`: Name of the folder to put all machines in (default: `""`) +* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project (default: `"k8s"`) +* `inventory_file`: Name of the generated inventory file for Kubespray to use in the Ansible step (default: `inventory.ini`) +* `dns_primary`: The IP address of primary DNS server (default: `8.8.4.4`) +* `dns_secondary`: The IP address of secondary DNS server (default: `8.8.8.8`) +* `firmware`: Firmware to use (default: `bios`) +* `hardware_version`: The version of the hardware (default: `15`) +* `master_cores`: The number of CPU cores for the master nodes (default: 4) +* `master_memory`: The amount of RAM for the master nodes in MB (default: 4096) +* `master_disk_size`: The amount of disk space for the master nodes in GB (default: 20) +* `worker_cores`: The number of CPU cores for the worker nodes (default: 16) +* `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192) +* `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100) +* `vapp`: Boolean to set the template type to vapp. (Default: false) +* `interface_name`: Name of the interface to configure. (Default: ens192) + +An example variables file can be found `default.tfvars` diff --git a/kubespray/contrib/terraform/vsphere/default.tfvars b/kubespray/contrib/terraform/vsphere/default.tfvars new file mode 100644 index 0000000..fa16936 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/default.tfvars @@ -0,0 +1,38 @@ +prefix = "k8s" + +inventory_file = "inventory.ini" + +network = "VM Network" + +machines = { + "master-0" : { + "node_type" : "master", + "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.10 + "netmask" : "24" + }, + "worker-0" : { + "node_type" : "worker", + "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.20 + "netmask" : "24" + }, + "worker-1" : { + "node_type" : "worker", + "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.21 + "netmask" : "24" + } +} + +gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.1 + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa I-did-not-read-the-docs", + "ssh-rsa I-did-not-read-the-docs 2", +] + +vsphere_datacenter = "i-did-not-read-the-docs" +vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster +vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000 +vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com + +template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg diff --git a/kubespray/contrib/terraform/vsphere/main.tf b/kubespray/contrib/terraform/vsphere/main.tf new file mode 100644 index 0000000..fb2d8c8 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/main.tf @@ -0,0 +1,100 @@ +provider "vsphere" { + # Username and password set through env vars VSPHERE_USER and VSPHERE_PASSWORD + user = var.vsphere_user + password = var.vsphere_password + + vsphere_server = var.vsphere_server + + # If you have a self-signed cert + allow_unverified_ssl = true +} + +data "vsphere_datacenter" "dc" { + name = var.vsphere_datacenter +} + +data "vsphere_datastore" "datastore" { + name = var.vsphere_datastore + datacenter_id = data.vsphere_datacenter.dc.id +} + +data "vsphere_network" "network" { + name = var.network + datacenter_id = data.vsphere_datacenter.dc.id +} + +data "vsphere_virtual_machine" "template" { + name = var.template_name + datacenter_id = data.vsphere_datacenter.dc.id +} + +data "vsphere_compute_cluster" "compute_cluster" { + name = var.vsphere_compute_cluster + datacenter_id = data.vsphere_datacenter.dc.id +} + +resource "vsphere_resource_pool" "pool" { + name = "${var.prefix}-cluster-pool" + parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster.resource_pool_id +} + +module "kubernetes" { + source = "./modules/kubernetes-cluster" + + prefix = var.prefix + + machines = var.machines + + ## Master ## + master_cores = var.master_cores + master_memory = var.master_memory + master_disk_size = var.master_disk_size + + ## Worker ## + worker_cores = var.worker_cores + worker_memory = var.worker_memory + worker_disk_size = var.worker_disk_size + + ## Global ## + + gateway = var.gateway + dns_primary = var.dns_primary + dns_secondary = var.dns_secondary + + pool_id = vsphere_resource_pool.pool.id + datastore_id = data.vsphere_datastore.datastore.id + + folder = var.folder + guest_id = data.vsphere_virtual_machine.template.guest_id + scsi_type = data.vsphere_virtual_machine.template.scsi_type + network_id = data.vsphere_network.network.id + adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0] + interface_name = var.interface_name + firmware = var.firmware + hardware_version = var.hardware_version + disk_thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned + + template_id = data.vsphere_virtual_machine.template.id + vapp = var.vapp + + ssh_public_keys = var.ssh_public_keys +} + +# +# Generate ansible inventory +# + +resource "local_file" "inventory" { + content = templatefile("${path.module}/templates/inventory.tpl", { + connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d", + keys(module.kubernetes.master_ip), + values(module.kubernetes.master_ip), + range(1, length(module.kubernetes.master_ip) + 1))), + connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s", + keys(module.kubernetes.worker_ip), + values(module.kubernetes.worker_ip))), + list_master = join("\n", formatlist("%s", keys(module.kubernetes.master_ip))), + list_worker = join("\n", formatlist("%s", keys(module.kubernetes.worker_ip))) + }) + filename = var.inventory_file +} diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf new file mode 100644 index 0000000..a44c2cf --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf @@ -0,0 +1,149 @@ +resource "vsphere_virtual_machine" "worker" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "worker" + } + + name = "${var.prefix}-${each.key}" + + resource_pool_id = var.pool_id + datastore_id = var.datastore_id + + num_cpus = var.worker_cores + memory = var.worker_memory + memory_reservation = var.worker_memory + guest_id = var.guest_id + enable_disk_uuid = "true" # needed for CSI provider + scsi_type = var.scsi_type + folder = var.folder + firmware = var.firmware + hardware_version = var.hardware_version + + wait_for_guest_net_routable = false + wait_for_guest_net_timeout = 0 + + network_interface { + network_id = var.network_id + adapter_type = var.adapter_type + } + + disk { + label = "disk0" + size = var.worker_disk_size + thin_provisioned = var.disk_thin_provisioned + } + + lifecycle { + ignore_changes = [disk] + } + + clone { + template_uuid = var.template_id + } + + cdrom { + client_device = true + } + + dynamic "vapp" { + for_each = var.vapp ? [1] : [] + + content { + properties = { + "user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) + } + } + } + + extra_config = { + "isolation.tools.copy.disable" = "FALSE" + "isolation.tools.paste.disable" = "FALSE" + "isolation.tools.setGUIOptions.enable" = "TRUE" + "guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) + "guestinfo.userdata.encoding" = "base64" + "guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}", + interface_name = var.interface_name + ip = each.value.ip, + netmask = each.value.netmask, + gw = var.gateway, + dns = var.dns_primary, + ssh_public_keys = var.ssh_public_keys })) + "guestinfo.metadata.encoding" = "base64" + } +} + +resource "vsphere_virtual_machine" "master" { + for_each = { + for name, machine in var.machines : + name => machine + if machine.node_type == "master" + } + + name = "${var.prefix}-${each.key}" + + resource_pool_id = var.pool_id + datastore_id = var.datastore_id + + num_cpus = var.master_cores + memory = var.master_memory + memory_reservation = var.master_memory + guest_id = var.guest_id + enable_disk_uuid = "true" # needed for CSI provider + scsi_type = var.scsi_type + folder = var.folder + firmware = var.firmware + hardware_version = var.hardware_version + + wait_for_guest_net_routable = false + wait_for_guest_net_timeout = 0 + + network_interface { + network_id = var.network_id + adapter_type = var.adapter_type + } + + disk { + label = "disk0" + size = var.master_disk_size + thin_provisioned = var.disk_thin_provisioned + } + + lifecycle { + ignore_changes = [disk] + } + + clone { + template_uuid = var.template_id + } + + cdrom { + client_device = true + } + + dynamic "vapp" { + for_each = var.vapp ? [1] : [] + + content { + properties = { + "user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) + } + } + } + + extra_config = { + "isolation.tools.copy.disable" = "FALSE" + "isolation.tools.paste.disable" = "FALSE" + "isolation.tools.setGUIOptions.enable" = "TRUE" + "guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) + "guestinfo.userdata.encoding" = "base64" + "guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}", + interface_name = var.interface_name + ip = each.value.ip, + netmask = each.value.netmask, + gw = var.gateway, + dns = var.dns_primary, + ssh_public_keys = var.ssh_public_keys })) + "guestinfo.metadata.encoding" = "base64" + } +} diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf new file mode 100644 index 0000000..93752ab --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf @@ -0,0 +1,15 @@ +output "master_ip" { + value = { + for name, machine in var.machines : + "${var.prefix}-${name}" => machine.ip + if machine.node_type == "master" + } +} + +output "worker_ip" { + value = { + for name, machine in var.machines : + "${var.prefix}-${name}" => machine.ip + if machine.node_type == "worker" + } +} diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl new file mode 100644 index 0000000..5f809af --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl @@ -0,0 +1,6 @@ +#cloud-config + +ssh_authorized_keys: +%{ for ssh_public_key in ssh_public_keys ~} + - ${ssh_public_key} +%{ endfor ~} diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl new file mode 100644 index 0000000..1553f08 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl @@ -0,0 +1,14 @@ +instance-id: ${hostname} +local-hostname: ${hostname} +network: + version: 2 + ethernets: + ${interface_name}: + match: + name: ${interface_name} + dhcp4: false + addresses: + - ${ip}/${netmask} + gateway4: ${gw} + nameservers: + addresses: [${dns}] diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl new file mode 100644 index 0000000..07d0778 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl @@ -0,0 +1,24 @@ +#cloud-config + +ssh_authorized_keys: +%{ for ssh_public_key in ssh_public_keys ~} + - ${ssh_public_key} +%{ endfor ~} + +write_files: + - path: /etc/netplan/10-user-network.yaml + content: |. + network: + version: 2 + ethernets: + ${interface_name}: + dhcp4: false #true to use dhcp + addresses: + - ${ip}/${netmask} + gateway4: ${gw} # Set gw here + nameservers: + addresses: + - ${dns} # Set DNS ip address here + +runcmd: + - netplan apply diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..cb99142 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf @@ -0,0 +1,43 @@ +## Global ## +variable "prefix" {} + +variable "machines" { + description = "Cluster machines" + type = map(object({ + node_type = string + ip = string + netmask = string + })) +} + +variable "gateway" {} +variable "dns_primary" {} +variable "dns_secondary" {} +variable "pool_id" {} +variable "datastore_id" {} +variable "guest_id" {} +variable "scsi_type" {} +variable "network_id" {} +variable "interface_name" {} +variable "adapter_type" {} +variable "disk_thin_provisioned" {} +variable "template_id" {} +variable "vapp" { + type = bool +} +variable "firmware" {} +variable "folder" {} +variable "ssh_public_keys" { + type = list(string) +} +variable "hardware_version" {} + +## Master ## +variable "master_cores" {} +variable "master_memory" {} +variable "master_disk_size" {} + +## Worker ## +variable "worker_cores" {} +variable "worker_memory" {} +variable "worker_disk_size" {} diff --git a/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf new file mode 100644 index 0000000..8c622fd --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + vsphere = { + source = "hashicorp/vsphere" + version = ">= 1.24.3" + } + } + required_version = ">= 0.13" +} diff --git a/kubespray/contrib/terraform/vsphere/output.tf b/kubespray/contrib/terraform/vsphere/output.tf new file mode 100644 index 0000000..a4338d9 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/output.tf @@ -0,0 +1,31 @@ +output "master_ip_addresses" { + value = module.kubernetes.master_ip +} + +output "worker_ip_addresses" { + value = module.kubernetes.worker_ip +} + +output "vsphere_datacenter" { + value = var.vsphere_datacenter +} + +output "vsphere_server" { + value = var.vsphere_server +} + +output "vsphere_datastore" { + value = var.vsphere_datastore +} + +output "vsphere_network" { + value = var.network +} + +output "vsphere_folder" { + value = var.folder +} + +output "vsphere_pool" { + value = "${terraform.workspace}-cluster-pool" +} diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/cluster.tfvars b/kubespray/contrib/terraform/vsphere/sample-inventory/cluster.tfvars new file mode 100644 index 0000000..dfa0a3d --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/cluster.tfvars @@ -0,0 +1,33 @@ +prefix = "default" + +inventory_file = "inventory.ini" + +machines = { + "master-0" : { + "node_type" : "master", + "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 + }, + "worker-0" : { + "node_type" : "worker", + "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 + }, + "worker-1" : { + "node_type" : "worker", + "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 + } +} + +gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2 + +ssh_public_keys = [ + # Put your public SSH key here + "ssh-rsa I-did-not-read-the-docs", + "ssh-rsa I-did-not-read-the-docs 2", +] + +vsphere_datacenter = "i-did-not-read-the-docs" +vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster +vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000 +vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com + +template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/all.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/aws.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/azure.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/containerd.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/coreos.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/cri-o.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/docker.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/etcd.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/gcp.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/hcloud.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/oci.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/offline.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/openstack.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/upcloud.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/vsphere.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/etcd.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/addons.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/sample-inventory/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/contrib/terraform/vsphere/templates/inventory.tpl b/kubespray/contrib/terraform/vsphere/templates/inventory.tpl new file mode 100644 index 0000000..28ff28a --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/templates/inventory.tpl @@ -0,0 +1,17 @@ + +[all] +${connection_strings_master} +${connection_strings_worker} + +[kube_control_plane] +${list_master} + +[etcd] +${list_master} + +[kube_node] +${list_worker} + +[k8s_cluster:children] +kube_control_plane +kube_node diff --git a/kubespray/contrib/terraform/vsphere/variables.tf b/kubespray/contrib/terraform/vsphere/variables.tf new file mode 100644 index 0000000..b7c18cd --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/variables.tf @@ -0,0 +1,99 @@ +## Global ## + +# Required variables + +variable "machines" { + description = "Cluster machines" + type = map(object({ + node_type = string + ip = string + netmask = string + })) +} + +variable "network" {} + +variable "gateway" {} + +variable "vsphere_datacenter" {} + +variable "vsphere_compute_cluster" {} + +variable "vsphere_datastore" {} + +variable "vsphere_user" {} + +variable "vsphere_password" {} + +variable "vsphere_server" {} + +variable "ssh_public_keys" { + description = "List of public SSH keys which are injected into the VMs." + type = list(string) +} + +variable "template_name" {} + +# Optional variables (ones where reasonable defaults exist) +variable "vapp" { + default = false +} + +variable "interface_name" { + default = "ens192" +} + +variable "folder" { + default = "" +} + +variable "prefix" { + default = "k8s" +} + +variable "inventory_file" { + default = "inventory.ini" +} + +variable "dns_primary" { + default = "8.8.4.4" +} + +variable "dns_secondary" { + default = "8.8.8.8" +} + +variable "firmware" { + default = "bios" +} + +variable "hardware_version" { + default = "15" +} + +## Master ## + +variable "master_cores" { + default = 4 +} + +variable "master_memory" { + default = 4096 +} + +variable "master_disk_size" { + default = "20" +} + +## Worker ## + +variable "worker_cores" { + default = 16 +} + +variable "worker_memory" { + default = 8192 +} +variable "worker_disk_size" { + default = "100" +} diff --git a/kubespray/contrib/terraform/vsphere/versions.tf b/kubespray/contrib/terraform/vsphere/versions.tf new file mode 100644 index 0000000..2a2a7c6 --- /dev/null +++ b/kubespray/contrib/terraform/vsphere/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + vsphere = { + source = "hashicorp/vsphere" + version = ">= 1.24.3" + } + null = { + source = "hashicorp/null" + } + template = { + source = "hashicorp/template" + } + } + required_version = ">= 0.13" +} diff --git a/kubespray/docs/_sidebar.md b/kubespray/docs/_sidebar.md new file mode 100644 index 0000000..6942bcf --- /dev/null +++ b/kubespray/docs/_sidebar.md @@ -0,0 +1,66 @@ +* [Readme](/) +* [Comparisons](/docs/comparisons.md) +* [Getting started](/docs/getting-started.md) +* [Ansible](docs/ansible.md) +* [Variables](/docs/vars.md) +* Operations + * [Integration](docs/integration.md) + * [Upgrades](/docs/upgrades.md) + * [HA Mode](docs/ha-mode.md) + * [Adding/replacing a node](docs/nodes.md) + * [Large deployments](docs/large-deployments.md) + * [Air-Gap Installation](docs/offline-environment.md) +* CNI + * [Calico](docs/calico.md) + * [Flannel](docs/flannel.md) + * [Kube Router](docs/kube-router.md) + * [Kube OVN](docs/kube-ovn.md) + * [Weave](docs/weave.md) + * [Multus](docs/multus.md) +* Ingress + * [kube-vip](docs/kube-vip.md) + * [ALB Ingress](docs/ingress_controller/alb_ingress_controller.md) + * [MetalLB](docs/metallb.md) + * [Nginx Ingress](docs/ingress_controller/ingress_nginx.md) +* [Cloud providers](docs/cloud.md) + * [AWS](docs/aws.md) + * [Azure](docs/azure.md) + * [OpenStack](/docs/openstack.md) + * [Equinix Metal](/docs/equinix-metal.md) + * [vSphere](/docs/vsphere.md) +* [Operating Systems](docs/bootstrap-os.md) + * [Debian](docs/debian.md) + * [Flatcar Container Linux](docs/flatcar.md) + * [Fedora CoreOS](docs/fcos.md) + * [OpenSUSE](docs/opensuse.md) + * [RedHat Enterprise Linux](docs/rhel.md) + * [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md) + * [Kylin Linux Advanced Server V10](docs/kylinlinux.md) + * [Amazon Linux 2](docs/amazonlinux.md) + * [UOS Linux](docs/uoslinux.md) + * [openEuler notes](docs/openeuler.md)) +* CRI + * [Containerd](docs/containerd.md) + * [Docker](docs/docker.md) + * [CRI-O](docs/cri-o.md) + * [Kata Containers](docs/kata-containers.md) + * [gVisor](docs/gvisor.md) +* Advanced + * [Proxy](/docs/proxy.md) + * [Downloads](docs/downloads.md) + * [Netcheck](docs/netcheck.md) + * [Cert Manager](docs/cert_manager.md) + * [DNS Stack](docs/dns-stack.md) + * [Kubernetes reliability](docs/kubernetes-reliability.md) + * [Local Registry](docs/kubernetes-apps/registry.md) + * [NTP](docs/ntp.md) +* External Storage Provisioners + * [RBD Provisioner](docs/kubernetes-apps/rbd_provisioner.md) + * [CEPHFS Provisioner](docs/kubernetes-apps/cephfs_provisioner.md) + * [Local Volume Provisioner](docs/kubernetes-apps/local_volume_provisioner.md) +* Developers + * [Test cases](docs/test_cases.md) + * [Vagrant](docs/vagrant.md) + * [CI Matrix](docs/ci.md) + * [CI Setup](docs/ci-setup.md) +* [Roadmap](docs/roadmap.md) diff --git a/kubespray/docs/amazonlinux.md b/kubespray/docs/amazonlinux.md new file mode 100644 index 0000000..a411e9c --- /dev/null +++ b/kubespray/docs/amazonlinux.md @@ -0,0 +1,15 @@ +# Amazon Linux 2 + +Amazon Linux is supported with docker,containerd and cri-o runtimes. + +**Note:** that Amazon Linux is not currently covered in kubespray CI and +support for it is currently considered experimental. + +Amazon Linux 2, while derrived from the Redhat OS family, does not keep in +sync with RHEL upstream like CentOS/AlmaLinux/Oracle Linux. In order to use +Amazon Linux as the ansible host for your kubespray deployments you need to +manually install `python3` and deploy ansible and kubespray dependencies in +a python virtual environment or use the official kubespray containers. + +There are no special considerations for using Amazon Linux as the target OS +for Kubespray deployments. diff --git a/kubespray/docs/ansible.md b/kubespray/docs/ansible.md new file mode 100644 index 0000000..980b136 --- /dev/null +++ b/kubespray/docs/ansible.md @@ -0,0 +1,303 @@ +# Ansible + +## Installing Ansible + +Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them. +Depending on your available python version you may be limited in choosing which ansible version to use. + +It is recommended to deploy the ansible version used by kubespray into a python virtual environment. + +```ShellSession +VENVDIR=kubespray-venv +KUBESPRAYDIR=kubespray +ANSIBLE_VERSION=2.12 +virtualenv --python=$(which python3) $VENVDIR +source $VENVDIR/bin/activate +cd $KUBESPRAYDIR +pip install -U -r requirements-$ANSIBLE_VERSION.txt +test -f requirements-$ANSIBLE_VERSION.yml && \ + ansible-galaxy role install -r requirements-$ANSIBLE_VERSION.yml && \ + ansible-galaxy collection -r requirements-$ANSIBLE_VERSION.yml +``` + +### Ansible Python Compatibility + +Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray. + +| Ansible Version | Python Version | +| --------------- | -------------- | +| 2.11 | 2.7,3.5-3.9 | +| 2.12 | 3.8-3.10 | + +## Inventory + +The inventory is composed of 3 groups: + +* **kube_node** : list of kubernetes nodes where the pods will run. +* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run. +* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose. + +Note: do not modify the children of _k8s_cluster_, like putting +the _etcd_ group into the _k8s_cluster_, unless you are certain +to do that and you have it fully contained in the latter: + +```ShellSession +etcd ⊂ k8s_cluster => kube_node ∩ etcd = etcd +``` + +When _kube_node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads. +If you want it a standalone, make sure those groups do not intersect. +If you want the server to act both as control-plane and node, the server must be defined +on both groups _kube_control_plane_ and _kube_node_. If you want a standalone and +unschedulable control plane, the server must be defined only in the _kube_control_plane_ and +not _kube_node_. + +There are also two special groups: + +* **calico_rr** : explained for [advanced Calico networking cases](/docs/calico.md) +* **bastion** : configure a bastion host if your nodes are not directly reachable + +Below is a complete inventory example: + +```ini +## Configure 'ip' variable to bind kubernetes services on a +## different ip than the default iface +node1 ansible_host=95.54.0.12 ip=10.3.0.1 +node2 ansible_host=95.54.0.13 ip=10.3.0.2 +node3 ansible_host=95.54.0.14 ip=10.3.0.3 +node4 ansible_host=95.54.0.15 ip=10.3.0.4 +node5 ansible_host=95.54.0.16 ip=10.3.0.5 +node6 ansible_host=95.54.0.17 ip=10.3.0.6 + +[kube_control_plane] +node1 +node2 + +[etcd] +node1 +node2 +node3 + +[kube_node] +node2 +node3 +node4 +node5 +node6 + +[k8s_cluster:children] +kube_node +kube_control_plane +``` + +## Group vars and overriding variables precedence + +The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``. +Optional variables are located in the `inventory/sample/group_vars/all.yml`. +Mandatory variables that are common for at least one role (or a node group) can be found in the +`inventory/sample/group_vars/k8s_cluster.yml`. +There are also role vars for docker, kubernetes preinstall and control plane roles. +According to the [ansible docs](https://docs.ansible.com/ansible/latest/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable), +those cannot be overridden from the group vars. In order to override, one should use +the `-e` runtime flags (most simple way) or other layers described in the docs. + +Kubespray uses only a few layers to override things (or expect them to +be overridden for roles): + +Layer | Comment +------|-------- +**role defaults** | provides best UX to override things for Kubespray deployments +inventory vars | Unused +**inventory group_vars** | Expects users to use ``all.yml``,``k8s_cluster.yml`` etc. to override things +inventory host_vars | Unused +playbook group_vars | Unused +playbook host_vars | Unused +**host facts** | Kubespray overrides for internal roles' logic, like state flags +play vars | Unused +play vars_prompt | Unused +play vars_files | Unused +registered vars | Unused +set_facts | Kubespray overrides those, for some places +**role and include vars** | Provides bad UX to override things! Use extra vars to enforce +block vars (only for tasks in block) | Kubespray overrides for internal roles' logic +task vars (only for the task) | Unused for roles, but only for helper scripts +**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` + +## Ansible tags + +The following tags are defined in playbooks: + +| Tag name | Used for +|--------------------------------|--------- +| annotate | Create kube-router annotation +| apps | K8s apps definitions +| asserts | Check tasks for download role +| aws-ebs-csi-driver | Configuring csi driver: aws-ebs +| azure-csi-driver | Configuring csi driver: azure +| bastion | Setup ssh config for bastion +| bootstrap-os | Anything related to host OS configuration +| calico | Network plugin Calico +| calico_rr | Configuring Calico route reflector +| canal | Network plugin Canal +| cephfs-provisioner | Configuring CephFS +| cert-manager | Configuring certificate manager for K8s +| cilium | Network plugin Cilium +| cinder-csi-driver | Configuring csi driver: cinder +| client | Kubernetes clients role +| cloud-provider | Cloud-provider related tasks +| cluster-roles | Configuring cluster wide application (psp ...) +| cni | CNI plugins for Network Plugins +| containerd | Configuring containerd engine runtime for hosts +| container_engine_accelerator | Enable nvidia accelerator for runtimes +| container-engine | Configuring container engines +| container-runtimes | Configuring container runtimes +| coredns | Configuring coredns deployment +| crio | Configuring crio container engine for hosts +| crun | Configuring crun runtime +| csi-driver | Configuring csi driver +| dashboard | Installing and configuring the Kubernetes Dashboard +| dns | Remove dns entries when resetting +| docker | Configuring docker engine runtime for hosts +| download | Fetching container images to a delegate host +| etcd | Configuring etcd cluster +| etcd-secrets | Configuring etcd certs/keys +| etchosts | Configuring /etc/hosts entries for hosts +| external-cloud-controller | Configure cloud controllers +| external-openstack | Cloud controller : openstack +| external-provisioner | Configure external provisioners +| external-vsphere | Cloud controller : vsphere +| facts | Gathering facts and misc check results +| files | Remove files when resetting +| flannel | Network plugin flannel +| gce | Cloud-provider GCP +| gcp-pd-csi-driver | Configuring csi driver: gcp-pd +| gvisor | Configuring gvisor runtime +| helm | Installing and configuring Helm +| ingress-controller | Configure ingress controllers +| ingress_alb | AWS ALB Ingress Controller +| init | Windows kubernetes init nodes +| iptables | Flush and clear iptable when resetting +| k8s-pre-upgrade | Upgrading K8s cluster +| k8s-secrets | Configuring K8s certs/keys +| k8s-gen-tokens | Configuring K8s tokens +| kata-containers | Configuring kata-containers runtime +| krew | Install and manage krew +| kubeadm | Roles linked to kubeadm tasks +| kube-apiserver | Configuring static pod kube-apiserver +| kube-controller-manager | Configuring static pod kube-controller-manager +| kube-vip | Installing and configuring kube-vip +| kubectl | Installing kubectl and bash completion +| kubelet | Configuring kubelet service +| kube-ovn | Network plugin kube-ovn +| kube-router | Network plugin kube-router +| kube-proxy | Configuring static pod kube-proxy +| localhost | Special steps for the localhost (ansible runner) +| local-path-provisioner | Configure External provisioner: local-path +| local-volume-provisioner | Configure External provisioner: local-volume +| macvlan | Network plugin macvlan +| master | Configuring K8s master node role +| metallb | Installing and configuring metallb +| metrics_server | Configuring metrics_server +| netchecker | Installing netchecker K8s app +| network | Configuring networking plugins for K8s +| mounts | Umount kubelet dirs when reseting +| multus | Network plugin multus +| nginx | Configuring LB for kube-apiserver instances +| node | Configuring K8s minion (compute) node role +| nodelocaldns | Configuring nodelocaldns daemonset +| node-label | Tasks linked to labeling of nodes +| node-webhook | Tasks linked to webhook (grating access to resources) +| nvidia_gpu | Enable nvidia accelerator for runtimes +| oci | Cloud provider: oci +| persistent_volumes | Configure csi volumes +| persistent_volumes_aws_ebs_csi | Configuring csi driver: aws-ebs +| persistent_volumes_cinder_csi | Configuring csi driver: cinder +| persistent_volumes_gcp_pd_csi | Configuring csi driver: gcp-pd +| persistent_volumes_openstack | Configuring csi driver: openstack +| policy-controller | Configuring Calico policy controller +| post-remove | Tasks running post-remove operation +| post-upgrade | Tasks running post-upgrade operation +| pre-remove | Tasks running pre-remove operation +| pre-upgrade | Tasks running pre-upgrade operation +| preinstall | Preliminary configuration steps +| registry | Configuring local docker registry +| reset | Tasks running doing the node reset +| resolvconf | Configuring /etc/resolv.conf for hosts/apps +| rbd-provisioner | Configure External provisioner: rdb +| services | Remove services (etcd, kubelet etc...) when resetting +| snapshot | Enabling csi snapshot +| snapshot-controller | Configuring csi snapshot controller +| upgrade | Upgrading, f.e. container images/binaries +| upload | Distributing images/binaries across hosts +| vsphere-csi-driver | Configuring csi driver: vsphere +| weave | Network plugin Weave +| win_nodes | Running windows specific tasks +| youki | Configuring youki runtime + +Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all +tags found in the codebase. New tags will be listed with the empty "Used for" +field. + +## Example commands + +Example command to filter and apply only DNS configuration tasks and skip +everything else related to host OS configuration and downloading images of containers: + +```ShellSession +ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os +``` + +And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files: + +```ShellSession +ansible-playbook -i inventory/sample/hosts.ini -e dns_mode='none' cluster.yml --tags resolvconf +``` + +And this prepares all container images locally (at the ansible runner node) without installing +or upgrading related stuff or trying to upload container to K8s cluster nodes: + +```ShellSession +ansible-playbook -i inventory/sample/hosts.ini cluster.yml \ + -e download_run_once=true -e download_localhost=true \ + --tags download --skip-tags upload,upgrade +``` + +Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing. + +## Bastion host + +If you prefer to not make your nodes publicly accessible (nodes with private IPs only), +you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion, +simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the +bastion host. + +```ShellSession +[bastion] +bastion ansible_host=x.x.x.x +``` + +For more information about Ansible and bastion hosts, read +[Running Ansible Through an SSH Bastion Host](https://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/) + +## Mitogen + +Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation. + +## Beyond ansible 2.9 + +Ansible project has decided, in order to ease their maintenance burden, to split between +two projects which are now joined under the Ansible umbrella. + +Ansible-base (2.10.x branch) will contain just the ansible language implementation while +ansible modules that were previously bundled into a single repository will be part of the +ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/) +that explains in detail the need and the evolution plan. + +**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`. +You first need to uninstall your old ansible (pre 2.10) version and install the new one. + +```ShellSession +pip uninstall ansible ansible-base ansible-core +cd kubespray/ +pip install -U . +``` diff --git a/kubespray/docs/arch.md b/kubespray/docs/arch.md new file mode 100644 index 0000000..4deae7a --- /dev/null +++ b/kubespray/docs/arch.md @@ -0,0 +1,17 @@ +# Architecture compatibility + +The following table shows the impact of the CPU architecture on compatible features: + +- amd64: Cluster using only x86/amd64 CPUs +- arm64: Cluster using only arm64 CPUs +- amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs + +| kube_network_plugin | amd64 | arm64 | amd64 + arm64 | +| ------------------- | ----- | ----- | ------------- | +| Calico | Y | Y | Y | +| Weave | Y | Y | Y | +| Flannel | Y | N | N | +| Canal | Y | N | N | +| Cilium | Y | Y | N | +| Contib | Y | N | N | +| kube-router | Y | N | N | diff --git a/kubespray/docs/aws-ebs-csi.md b/kubespray/docs/aws-ebs-csi.md new file mode 100644 index 0000000..1957277 --- /dev/null +++ b/kubespray/docs/aws-ebs-csi.md @@ -0,0 +1,87 @@ +# AWS EBS CSI Driver + +AWS EBS CSI driver allows you to provision EBS volumes for pods in EC2 instances. The old in-tree AWS cloud provider is deprecated and will be removed in future versions of Kubernetes. So transitioning to the CSI driver is advised. + +To enable AWS EBS CSI driver, uncomment the `aws_ebs_csi_enabled` option in `group_vars/all/aws.yml` and set it to `true`. + +To set the number of replicas for the AWS CSI controller, you can change `aws_ebs_csi_controller_replicas` option in `group_vars/all/aws.yml`. + +Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json) + +If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. + +You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled. + +## Usage example + +To check if AWS EBS CSI Driver is deployed properly, check that the ebs-csi pods are running: + +```ShellSession +$ kubectl -n kube-system get pods | grep ebs +ebs-csi-controller-85d86bccc5-8gtq5 4/4 Running 4 40s +ebs-csi-node-n4b99 3/3 Running 3 40s +``` + +Check the associated storage class (if you enabled persistent_volumes): + +```ShellSession +$ kubectl get storageclass +NAME PROVISIONER AGE +ebs-sc ebs.csi.aws.com 45s +``` + +You can run a PVC and an example Pod using this file `ebs-pod.yml`: + +```yml +-- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ebs-claim +spec: + accessModes: + - ReadWriteOnce + storageClassName: ebs-sc + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: app +spec: + containers: + - name: app + image: centos + command: ["/bin/sh"] + args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"] + volumeMounts: + - name: persistent-storage + mountPath: /data + volumes: + - name: persistent-storage + persistentVolumeClaim: + claimName: ebs-claim +``` + +Apply this conf to your cluster: ```kubectl apply -f ebs-pod.yml``` + +You should see the PVC provisioned and bound: + +```ShellSession +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s +``` + +And the volume mounted to the example Pod (wait until the Pod is Running): + +```ShellSession +$ kubectl exec -it app -- df -h | grep data +/dev/nvme1n1 1014M 34M 981M 4% /data +``` + +## More info + +For further information about the AWS EBS CSI Driver, you can refer to this page: [AWS EBS Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/). diff --git a/kubespray/docs/aws.md b/kubespray/docs/aws.md new file mode 100644 index 0000000..b45508c --- /dev/null +++ b/kubespray/docs/aws.md @@ -0,0 +1,81 @@ +# AWS + +To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider. + +Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. + +You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targeted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`. + +Make sure your VPC has both DNS Hostnames support and Private DNS enabled. + +The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. + +You can now create your cluster! + +## Dynamic Inventory + +There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome. + +This will produce an inventory that is passed into Ansible that looks like the following: + +```json +{ + "_meta": { + "hostvars": { + "ip-172-31-3-xxx.us-east-2.compute.internal": { + "ansible_ssh_host": "172.31.3.xxx" + }, + "ip-172-31-8-xxx.us-east-2.compute.internal": { + "ansible_ssh_host": "172.31.8.xxx" + } + } + }, + "etcd": [ + "ip-172-31-3-xxx.us-east-2.compute.internal" + ], + "k8s_cluster": { + "children": [ + "kube_control_plane", + "kube_node" + ] + }, + "kube_control_plane": [ + "ip-172-31-3-xxx.us-east-2.compute.internal" + ], + "kube_node": [ + "ip-172-31-8-xxx.us-east-2.compute.internal" + ] +} +``` + +Guide: + +- Create instances in AWS as needed. +- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube_node`. You can also share roles like `kube_control_plane, etcd` +- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. +- Set the following AWS credentials and info as environment variables in your terminal: + +```ShellSession +export AWS_ACCESS_KEY_ID="xxxxx" +export AWS_SECRET_ACCESS_KEY="yyyyy" +export REGION="us-east-2" +``` + +- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` + +## Kubespray configuration + +Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case. + +Variable|Type|Comment +---|---|--- +aws_zone|string|Force set the AWS zone. Recommended to leave blank. +aws_vpc|string|The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided +aws_subnet_id|string|SubnetID enables using a specific subnet to use for ELB's +aws_route_table_id|string|RouteTableID enables using a specific RouteTable +aws_role_arn|string|RoleARN is the IAM role to assume when interaction with AWS APIs +aws_kubernetes_cluster_tag|string|KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources +aws_kubernetes_cluster_id|string|KubernetesClusterID is the cluster id we'll use to identify our cluster resources +aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000. +aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead. +aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment. diff --git a/kubespray/docs/azure-csi.md b/kubespray/docs/azure-csi.md new file mode 100644 index 0000000..1cc3a68 --- /dev/null +++ b/kubespray/docs/azure-csi.md @@ -0,0 +1,128 @@ +# Azure Disk CSI Driver + +The Azure Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Azure Cloud. The CSI driver replaces to volume provisioning done by the in-tree azure cloud provider which is deprecated. + +This documentation is an updated version of the in-tree Azure cloud provider documentation (azure.md). + +To deploy Azure Disk CSI driver, uncomment the `azure_csi_enabled` option in `group_vars/all/azure.yml` and set it to `true`. + +## Azure Disk CSI Storage Class + +If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. + +## Parameters + +Before creating the instances you must first set the `azure_csi_` variables in the `group_vars/all.yml` file. + +All of the values can be retrieved using the azure cli tool which can be downloaded here: + +After installation you have to run `az login` to get access to your account. + +### azure\_csi\_tenant\_id + azure\_csi\_subscription\_id + +Run `az account show` to retrieve your subscription id and tenant id: +`azure_csi_tenant_id` -> tenantId field +`azure_csi_subscription_id` -> id field + +### azure\_csi\_location + +The region your instances are located in, it can be something like `francecentral` or `norwayeast`. A full list of region names can be retrieved via `az account list-locations` + +### azure\_csi\_resource\_group + +The name of the resource group your instances are in, a list of your resource groups can be retrieved via `az group list` + +Or you can do `az vm list | grep resourceGroup` and get the resource group corresponding to the VMs of your cluster. + +The resource group name is not case sensitive. + +### azure\_csi\_vnet\_name + +The name of the virtual network your instances are in, can be retrieved via `az network vnet list` + +### azure\_csi\_vnet\_resource\_group + +The name of the resource group your vnet is in, can be retrieved via `az network vnet list | grep resourceGroup` and get the resource group corresponding to the vnet of your cluster. + +### azure\_csi\_subnet\_name + +The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME` + +### azure\_csi\_security\_group\_name + +The name of the network security group your instances are in, can be retrieved via `az network nsg list` + +### azure\_csi\_aad\_client\_id + azure\_csi\_aad\_client\_secret + +These will have to be generated first: + +- Create an Azure AD Application with: + + ```ShellSession + az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET + ``` + +Display name, identifier-uri, homepage and the password can be chosen + +Note the AppId in the output. + +- Create Service principal for the application with: + + ```ShellSession + az ad sp create --id AppId + ``` + +This is the AppId from the last command + +- Create the role assignment with: + + ```ShellSession + az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID + ``` + +azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret. + +### azure\_csi\_use\_instance\_metadata + +Use instance metadata service where possible. Boolean value. + +## Test the Azure Disk CSI driver + +To test the dynamic provisioning using Azure CSI driver, make sure to have the storage class deployed (through persistent volumes), and apply the following manifest: + +```yml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-azuredisk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: disk.csi.azure.com +--- +kind: Pod +apiVersion: v1 +metadata: + name: nginx-azuredisk +spec: + nodeSelector: + kubernetes.io/os: linux + containers: + - image: nginx + name: nginx-azuredisk + command: + - "/bin/sh" + - "-c" + - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done + volumeMounts: + - name: azuredisk + mountPath: "/mnt/azuredisk" + volumes: + - name: azuredisk + persistentVolumeClaim: + claimName: pvc-azuredisk +``` diff --git a/kubespray/docs/azure.md b/kubespray/docs/azure.md new file mode 100644 index 0000000..a58ca45 --- /dev/null +++ b/kubespray/docs/azure.md @@ -0,0 +1,123 @@ +# Azure + +To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `'azure'`. + +All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in. + +Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/Azure/AKS) + +## Parameters + +Before creating the instances you must first set the `azure_` variables in the `group_vars/all/all.yml` file. + +All of the values can be retrieved using the Azure CLI tool which can be downloaded here: +After installation you have to run `az login` to get access to your account. + +### azure_cloud + +Azure Stack has different API endpoints, depending on the Azure Stack deployment. These need to be provided to the Azure SDK. +Possible values are: `AzureChinaCloud`, `AzureGermanCloud`, `AzurePublicCloud` and `AzureUSGovernmentCloud`. +The full list of existing settings for the AzureChinaCloud, AzureGermanCloud, AzurePublicCloud and AzureUSGovernmentCloud +is available in the source code [here](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/cloud-provider-config.md) + +### azure\_tenant\_id + azure\_subscription\_id + +run `az account show` to retrieve your subscription id and tenant id: +`azure_tenant_id` -> Tenant ID field +`azure_subscription_id` -> ID field + +### azure\_location + +The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `az account list-locations` + +### azure\_resource\_group + +The name of the resource group your instances are in, can be retrieved via `az group list` + +### azure\_vmtype + +The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtual Machines` then value is `standard`. If vm is part of `Virtual Machine Scale Sets` then value is `vmss` + +### azure\_vnet\_name + +The name of the virtual network your instances are in, can be retrieved via `az network vnet list` + +### azure\_vnet\_resource\_group + +The name of the resource group that contains the vnet. + +### azure\_subnet\_name + +The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME` + +### azure\_security\_group\_name + +The name of the network security group your instances are in, can be retrieved via `az network nsg list` + +### azure\_security\_group\_resource\_group + +The name of the resource group that contains the network security group. Defaults to `azure_vnet_resource_group` + +### azure\_route\_table\_name + +The name of the route table used with your instances. + +### azure\_route\_table\_resource\_group + +The name of the resource group that contains the route table. Defaults to `azure_vnet_resource_group` + +### azure\_aad\_client\_id + azure\_aad\_client\_secret + +These will have to be generated first: + +- Create an Azure AD Application with: + + ```ShellSession + az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET + ``` + +display name, identifier-uri, homepage and the password can be chosen +Note the AppId in the output. + +- Create Service principal for the application with: + + ```ShellSession + az ad sp create --id AppId + ``` + +This is the AppId from the last command + +- Create the role assignment with: + + ```ShellSession + az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID + ``` + +azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret. + +### azure\_loadbalancer\_sku + +Sku of Load Balancer and Public IP. Candidate values are: basic and standard. + +### azure\_exclude\_master\_from\_standard\_lb + +azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer. + +### azure\_disable\_outbound\_snat + +azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`. + +### azure\_primary\_availability\_set\_name + +(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure +cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and +multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend +pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field. + +### azure\_use\_instance\_metadata + +Use instance metadata service where possible + +## Provisioning Azure with Resource Group Templates + +You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) diff --git a/kubespray/docs/bootstrap-os.md b/kubespray/docs/bootstrap-os.md new file mode 100644 index 0000000..c2a75c0 --- /dev/null +++ b/kubespray/docs/bootstrap-os.md @@ -0,0 +1,61 @@ +# bootstrap-os + +Bootstrap an Ansible host to be able to run Ansible modules. + +This role will: + +* configure the package manager (if applicable) to be able to fetch packages +* install Python +* install the necessary packages to use Ansible's package manager modules +* set the hostname of the host to `{{ inventory_hostname }}` when requested + +## Requirements + +A host running an operating system that is supported by Kubespray. +See [Supported Linux Distributions](https://github.com/kubernetes-sigs/kubespray#supported-linux-distributions) for a current list. + +SSH access to the host. + +## Role Variables + +Variables are listed with their default values, if applicable. + +### General variables + +* `http_proxy`/`https_proxy` + The role will configure the package manager (if applicable) to download packages via a proxy. + +* `override_system_hostname: true` + The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`). + +### Per distribution variables + +#### Flatcar Container Linux + +* `coreos_locksmithd_disable: false` + Whether `locksmithd` (responsible for rolling restarts) should be disabled or be left alone. + +#### CentOS/RHEL/AlmaLinux/Rocky Linux + +* `centos_fastestmirror_enabled: false` + Whether the [fastestmirror](https://wiki.centos.org/PackageManagement/Yum/FastestMirror) yum plugin should be enabled. + +## Dependencies + +The `kubespray-defaults` role is expected to be run before this role. + +## Example Playbook + +Remember to disable fact gathering since Python might not be present on hosts. + +```yaml +- hosts: all + gather_facts: false # not all hosts might be able to run modules yet + roles: + - kubespray-defaults + - bootstrap-os +``` + +## License + +Apache 2.0 diff --git a/kubespray/docs/calico.md b/kubespray/docs/calico.md new file mode 100644 index 0000000..ad1115b --- /dev/null +++ b/kubespray/docs/calico.md @@ -0,0 +1,429 @@ +# Calico + +Check if the calico-node container is running + +```ShellSession +docker ps | grep calico +``` + +The **calicoctl.sh** is wrap script with configured access credentials for command calicoctl allows to check the status of the network workloads. + +* Check the status of Calico nodes + +```ShellSession +calicoctl.sh node status +``` + +* Show the configured network subnet for containers + +```ShellSession +calicoctl.sh get ippool -o wide +``` + +* Show the workloads (ip addresses of containers and their location) + +```ShellSession +calicoctl.sh get workloadEndpoint -o wide +``` + +and + +```ShellSession +calicoctl.sh get hostEndpoint -o wide +``` + +## Configuration + +### Optional : Define datastore type + +The default datastore, Kubernetes API datastore is recommended for on-premises deployments, and supports only Kubernetes workloads; etcd is the best datastore for hybrid deployments. + +Allowed values are `kdd` (default) and `etcd`. + +Note: using kdd and more than 50 nodes, consider using the `typha` daemon to provide scaling. + +To re-define you need to edit the inventory and add a group variable `calico_datastore` + +```yml +calico_datastore: kdd +``` + +### Optional : Define network backend + +In some cases you may want to define Calico network backend. Allowed values are `bird`, `vxlan` or `none`. `vxlan` is the default value. + +To re-define you need to edit the inventory and add a group variable `calico_network_backend` + +```yml +calico_network_backend: none +``` + +### Optional : Define the default pool CIDRs + +By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6. +In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s_cluster/k8s-net-calico.yml): + +```ShellSession +calico_pool_cidr: 10.233.64.0/20 +calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +``` + +### Optional : BGP Peering with border routers + +In some cases you may want to route the pods subnet and so NAT is not needed on the nodes. +For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located. +The following variables need to be set as follow: + +```yml +peer_with_router: true # enable the peering with the datacenter's border router (default value: false). +nat_outgoing: false # (optional) NAT outgoing (default value: true). +``` + +And you'll need to edit the inventory and add a hostvar `local_as` by node. + +```ShellSession +node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx +``` + +### Optional : Defining BGP peers + +Peers can be defined using the `peers` variable (see docs/calico_peer_example examples). +In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global". +In order to define peers on a per node basis, the `peers` variable must be defined in hostvars. +NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers) + +Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs. +This can be enabled by setting the following variable as follow in group_vars (k8s_cluster/k8s-net-calico.yml) + +```yml +calico_advertise_cluster_ips: true +``` + +Since calico 3.10, Calico supports advertising Kubernetes service ExternalIPs over BGP in addition to cluster IPs advertising. +This can be enabled by setting the following variable in group_vars (k8s_cluster/k8s-net-calico.yml) + +```yml +calico_advertise_service_external_ips: +- x.x.x.x/24 +- y.y.y.y/32 +``` + +### Optional : Define global AS number + +Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key). +It defaults to "64512". + +### Optional : BGP Peering with route reflectors + +At large scale you may want to disable full node-to-node mesh in order to +optimize your BGP topology and improve `calico-node` containers' start times. + +To do so you can deploy BGP route reflectors and peer `calico-node` with them as +recommended here: + +* +* + +You need to edit your inventory and add: + +* `calico_rr` group with nodes in it. `calico_rr` can be combined with + `kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child + group of `k8s_cluster` group. +* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/)) + +Here's an example of Kubespray inventory with standalone route reflectors: + +```ini +[all] +rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10 +rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11 +node2 ansible_ssh_host=10.210.1.12 ip=10.210.1.12 +node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13 +node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14 +node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15 + +[kube_control_plane] +node2 +node3 + +[etcd] +node2 +node3 +node4 + +[kube_node] +node2 +node3 +node4 +node5 + +[k8s_cluster:children] +kube_node +kube_control_plane +calico_rr + +[calico_rr] +rr0 +rr1 + +[rack0] +rr0 +rr1 +node2 +node3 +node4 +node5 + +[rack0:vars] +cluster_id="1.0.0.1" +calico_rr_id=rr1 +calico_group_id=rr1 +``` + +The inventory above will deploy the following topology assuming that calico's +`global_as_num` is set to `65400`: + +![Image](figures/kubespray-calico-rr.png?raw=true) + +### Optional : Define default endpoint to host action + +By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see and Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped. + +To re-define default action please set the following variable in your inventory: + +```yml +calico_endpoint_to_host_action: "ACCEPT" +``` + +### Optional : Define address on which Felix will respond to health requests + +Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost. + +To re-define health host please set the following variable in your inventory: + +```yml +calico_healthhost: "0.0.0.0" +``` + +### Optional : Configure VXLAN hardware Offload + +Because of the Issue [projectcalico/calico#4727](https://github.com/projectcalico/calico/issues/4727), The VXLAN Offload is disable by default. It can be configured like this: + +```yml +calico_feature_detect_override: "ChecksumOffloadBroken=true" # The vxlan offload will enabled with kernel version is > 5.7 (It may cause problem on buggy NIC driver) +``` + +### Optional : Configure Calico Node probe timeouts + +Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this: + +```yml +calico_node_livenessprobe_timeout: 10 +calico_node_readinessprobe_timeout: 10 +``` + +## Config encapsulation for cross server traffic + +Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation. + +*IP in IP* and *VXLAN* is mutually exclusive modes. + +Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices. + +If you are running your cluster with the default calico settings and are upgrading to a release post 2.18.x (i.e. 2.19 and later or `master` branch) then you have two options: + +* perform a manual migration to vxlan before upgrading kubespray (see migrating from IP in IP to VXLAN below) +* pin the pre-2.19 settings in your ansible inventory (see IP in IP mode settings below) + +### IP in IP mode + +To configure Ip in Ip mode you need to use the bird network backend. + +```yml +calico_ipip_mode: 'Always' # Possible values is `Always`, `CrossSubnet`, `Never` +calico_vxlan_mode: 'Never' +calico_network_backend: 'bird' +``` + +### BGP mode + +To enable BGP no-encapsulation mode: + +```yml +calico_ipip_mode: 'Never' +calico_vxlan_mode: 'Never' +calico_network_backend: 'bird' +``` + +### Migrating from IP in IP to VXLAN + +If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings. + +Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding. + +```shell +calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}' +calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}' +``` + +**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray. + +Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`. + +```shell +calicoctl.sh patch felixconfig default -p '{"spec":{"ipipEnabled":false}}' +``` + +## Configuring interface MTU + +This is an advanced topic and should usually not be modified unless you know exactly what you are doing. Calico is smart enough to deal with the defaults and calculate the proper MTU. If you do need to set up a custom MTU you can change `calico_veth_mtu` as follows: + +* If Wireguard is enabled, subtract 60 from your network MTU (i.e. 1500-60=1440) +* If using VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +* If using IPIP, subtract 20 from your network MTU (i.e. 1500-20=1480) +* if not using any encapsulation, set to your network MTU (i.e. 1500 or 9000) + +```yaml +calico_veth_mtu: 1440 +``` + +## Cloud providers configuration + +Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``calico_ipip_mode: Always`` if the cloud provider was defined. + +### Optional : Ignore kernel's RPF check setting + +By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting: + +```yml +calico_node_ignorelooserpf: true +``` + +Note that in OpenStack you must allow `ipip` traffic in your security groups, +otherwise you will experience timeouts. +To do this you must add a rule which allows it, for example: + +### Optional : Felix configuration via extraenvs of calico node + +Possible environment variable parameters for [configuring Felix](https://docs.projectcalico.org/reference/felix/configuration) + +```yml +calico_node_extra_envs: + FELIX_DEVICEROUTESOURCEADDRESS: 172.17.0.1 +``` + +```ShellSession +neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t +neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t +``` + +### Optional : Use Calico CNI host-local IPAM plugin + +Calico currently supports two types of CNI IPAM plugins, `host-local` and `calico-ipam` (default). + +To allow Calico to determine the subnet to use from the Kubernetes API based on the `Node.podCIDR` field, enable the following setting. + +```yml +calico_ipam_host_local: true +``` + +Refer to Project Calico section [Using host-local IPAM](https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam) for further information. + +### Optional : Disable CNI logging to disk + +Calico CNI plugin logs to /var/log/calico/cni/cni.log and to stderr. +stderr of CNI plugins can be found in the logs of container runtime. + +You can disable Calico CNI logging to disk by setting `calico_cni_log_file_path: false`. + +## eBPF Support + +Calico supports eBPF for its data plane see [an introduction to the Calico eBPF Dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) for further information. + +Note that it is advisable to always use the latest version of Calico when using the eBPF dataplane. + +### Enabling eBPF support + +To enable the eBPF dataplane support ensure you add the following to your inventory. Note that the `kube-proxy` is incompatible with running Calico in eBPF mode and the kube-proxy should be removed from the system. + +```yaml +calico_bpf_enabled: true +``` + +**NOTE:** there is known incompatibility in using the `kernel-kvm` kernel package on Ubuntu OSes because it is missing support for `CONFIG_NET_SCHED` which is a requirement for Calico eBPF support. When using Calico eBPF with Ubuntu ensure you run the `-generic` kernel. + +### Cleaning up after kube-proxy + +Calico node cannot clean up after kube-proxy has run in ipvs mode. If you are converting an existing cluster to eBPF you will need to ensure the `kube-proxy` DaemonSet is deleted and that ipvs rules are cleaned. + +To check that kube-proxy was running in ipvs mode: + +```ShellSession +# ipvsadm -l +``` + +To clean up any ipvs leftovers: + +```ShellSession +# ipvsadm -C +``` + +### Calico access to the kube-api + +Calico node, typha and kube-controllers need to be able to talk to the kubernetes API. Please reference the [Enabling eBPF Calico Docs](https://docs.projectcalico.org/maintenance/ebpf/enabling-bpf) for guidelines on how to do this. + +Kubespray sets up the `kubernetes-services-endpoint` configmap based on the contents of the `loadbalancer_apiserver` inventory variable documented in [HA Mode](/docs/ha-mode.md). + +If no external loadbalancer is used, Calico eBPF can also use the localhost loadbalancer option. In this case Calico Automatic Host Endpoints need to be enabled to allow services like `coredns` and `metrics-server` to communicate with the kubernetes host endpoint. See [this blog post](https://www.projectcalico.org/securing-kubernetes-nodes-with-calico-automatic-host-endpoints/) on enabling automatic host endpoints. + +```yaml +loadbalancer_apiserver_localhost: true +use_localhost_as_kubeapi_loadbalancer: true +``` + +### Tunneled versus Direct Server Return + +By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service. + +To configure DSR: + +```yaml +calico_bpf_service_mode: "DSR" +``` + +### eBPF Logging and Troubleshooting + +In order to enable Calico eBPF mode logging: + +```yaml +calico_bpf_log_level: "Debug" +``` + +To view the logs you need to use the `tc` command to read the kernel trace buffer: + +```ShellSession +tc exec bpf debug +``` + +Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/maintenance/troubleshoot/troubleshoot-ebpf#ebpf-program-debug-logs). + +## Wireguard Encryption + +Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic). + +To enable wireguard support: + +```yaml +calico_wireguard_enabled: true +``` + +The following OSes will require enabling the EPEL repo in order to bring in wireguard tools: + +* CentOS 7 & 8 +* AlmaLinux 8 +* Rocky Linux 8 +* Amazon Linux 2 + +```yaml +epel_enabled: true +``` diff --git a/kubespray/docs/calico_peer_example/new-york.yml b/kubespray/docs/calico_peer_example/new-york.yml new file mode 100644 index 0000000..af497a9 --- /dev/null +++ b/kubespray/docs/calico_peer_example/new-york.yml @@ -0,0 +1,12 @@ +# --- +# peers: +# - router_id: "10.99.0.34" +# as: "65xxx" +# sourceaddress: "None" +# - router_id: "10.99.0.35" +# as: "65xxx" +# sourceaddress: "None" + +# loadbalancer_apiserver: +# address: "10.99.0.44" +# port: "8383" diff --git a/kubespray/docs/calico_peer_example/paris.yml b/kubespray/docs/calico_peer_example/paris.yml new file mode 100644 index 0000000..1768e03 --- /dev/null +++ b/kubespray/docs/calico_peer_example/paris.yml @@ -0,0 +1,12 @@ +# --- +# peers: +# - router_id: "10.99.0.2" +# as: "65xxx" +# sourceaddress: "None" +# - router_id: "10.99.0.3" +# as: "65xxx" +# sourceaddress: "None" + +# loadbalancer_apiserver: +# address: "10.99.0.21" +# port: "8383" diff --git a/kubespray/docs/centos.md b/kubespray/docs/centos.md new file mode 100644 index 0000000..67a1f17 --- /dev/null +++ b/kubespray/docs/centos.md @@ -0,0 +1,16 @@ +# CentOS and derivatives + +## CentOS 7 + +The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above. +Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported. + +## CentOS 8 + +CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8) +The only tested configuration for now is using Calico CNI +You need to add `calico_iptables_backend: "NFT"` to your configuration. + +If you have containers that are using iptables in the host network namespace (`hostNetwork=true`), +you need to ensure they are using iptables-nft. +An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966) diff --git a/kubespray/docs/cert_manager.md b/kubespray/docs/cert_manager.md new file mode 100644 index 0000000..4ed28af --- /dev/null +++ b/kubespray/docs/cert_manager.md @@ -0,0 +1,196 @@ +# Installation Guide + +- [Installation Guide](#installation-guide) + - [Kubernetes TLS Root CA Certificate/Key Secret](#kubernetes-tls-root-ca-certificatekey-secret) + - [Securing Ingress Resources](#securing-ingress-resources) + - [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key) + - [Install Cloudflare PKI/TLS `cfssl` Toolkit.](#install-cloudflare-pkitls-cfssl-toolkit) + - [Create Root Certificate Authority (CA) Configuration File](#create-root-certificate-authority-ca-configuration-file) + - [Create Certficate Signing Request (CSR) Configuration File](#create-certficate-signing-request-csr-configuration-file) + - [Create TLS Root CA Certificate and Key](#create-tls-root-ca-certificate-and-key) + +Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry. + +## Kubernetes TLS Root CA Certificate/Key Secret + +If you're planning to secure your ingress resources using TLS client certificates, you'll need to create and deploy the Kubernetes `ca-key-pair` secret consisting of the Root CA certificate and key to your K8s cluster. + +For further information, read the official [Cert-Manager CA Configuration](https://cert-manager.io/docs/configuration/ca/) doc. + +`cert-manager` can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and setting `cert_manager_enabled` to true. + +```ini +# Cert manager deployment +cert_manager_enabled: true +``` + +If you don't have a TLS Root CA certificate and key available, you can create these by following the steps outlined in section [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key) using the Cloudflare PKI/TLS `cfssl` toolkit. TLS Root CA certificates and keys can also be created using `ssh-keygen` and OpenSSL, if `cfssl` is not available. + +## Securing Ingress Resources + +A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this. + +To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and set `ingress_nginx_enabled` to true. + +```ini +# Nginx ingress controller deployment +ingress_nginx_enabled: true +``` + +For example, if you're using the Nginx ingress controller, you can secure the Prometheus ingress by adding the annotation `cert-manager.io/cluster-issuer: ca-issuer` and the `spec.tls` section to the `Ingress` resource definition. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: prometheus-k8s + namespace: monitoring + labels: + prometheus: k8s + annotations: + kubernetes.io/ingress.class: "nginx" + cert-manager.io/cluster-issuer: ca-issuer +spec: + tls: + - hosts: + - prometheus.example.com + secretName: prometheus-dashboard-certs + rules: + - host: prometheus.example.com + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: prometheus-k8s + port: + name: web +``` + +Once deployed to your K8s cluster, every 3 months cert-manager will automatically rotate the Prometheus `prometheus.example.com` TLS client certificate and key, and store these as the Kubernetes `prometheus-dashboard-certs` secret. + +Please consult the official upstream documentation: + +- [cert-manager Ingress Usage](https://cert-manager.io/v1.5-docs/usage/ingress/) +- [cert-manager Ingress Tutorial](https://cert-manager.io/v1.5-docs/tutorials/acme/ingress/#step-3-assign-a-dns-name) + +### ACME + +The ACME Issuer type represents a single account registered with the Automated Certificate Management Environment (ACME) Certificate Authority server. When you create a new ACME Issuer, cert-manager will generate a private key which is used to identify you with the ACME server. + +Certificates issued by public ACME servers are typically trusted by client’s computers by default. This means that, for example, visiting a website that is backed by an ACME certificate issued for that URL, will be trusted by default by most client’s web browsers. ACME certificates are typically free. + +- [ACME Configuration](https://cert-manager.io/v1.5-docs/configuration/acme/) +- [ACME HTTP Validation](https://cert-manager.io/v1.5-docs/tutorials/acme/http-validation/) + - [HTTP01 Challenges](https://cert-manager.io/v1.5-docs/configuration/acme/http01/) +- [ACME DNS Validation](https://cert-manager.io/v1.5-docs/tutorials/acme/dns-validation/) + - [DNS01 Challenges](https://cert-manager.io/v1.5-docs/configuration/acme/dns01/) +- [ACME FAQ](https://cert-manager.io/v1.5-docs/faq/acme/) + +#### ACME With An Internal Certificate Authority + +The ACME Issuer with an internal certificate authority requires cert-manager to trust the certificate authority. This trust must be done at the cert-manager deployment level. +To add a trusted certificate authority to cert-manager, add it's certificate to `group_vars/k8s-cluster/addons.yml`: + +```yaml +cert_manager_trusted_internal_ca: | + -----BEGIN CERTIFICATE----- + [REPLACE with your CA certificate] + -----END CERTIFICATE----- +``` + +Once the CA is trusted, you can define your issuer normally. + +### Create New TLS Root CA Certificate and Key + +#### Install Cloudflare PKI/TLS `cfssl` Toolkit + +e.g. For Ubuntu/Debian distributions, the toolkit is part of the `golang-cfssl` package. + +```shell +sudo apt-get install -y golang-cfssl +``` + +#### Create Root Certificate Authority (CA) Configuration File + +The default TLS certificate expiry time period is `8760h` which is 5 years from the date the certificate is created. + +```shell +$ cat > ca-config.json < ca-csr.json < The Cilium Operator is responsible for managing duties in the cluster which should logically be handled once for the entire cluster, rather than once for each node in the cluster. + +### Adding custom flags to the Cilium Operator + +You can set additional cilium-operator container arguments using `cilium_operator_custom_args`. +This is an advanced option, and you should only use it if you know what you are doing. + +Accepts an array or a string. + +```yml +cilium_operator_custom_args: ["--foo=bar", "--baz=qux"] +``` + +or + +```yml +cilium_operator_custom_args: "--foo=bar" +``` + +You do not need to add a custom flag to enable debugging. Instead, feel free to use the `CILIUM_DEBUG` variable. + +### Adding extra volumes and mounting them + +You can use `cilium_operator_extra_volumes` to add extra volumes to the Cilium Operator, and use `cilium_operator_extra_volume_mounts` to mount those volumes. +This is an advanced option, and you should only use it if you know what you are doing. + +```yml +cilium_operator_extra_volumes: + - configMap: + name: foo + name: foo-mount-path + +cilium_operator_extra_volume_mounts: + - mountPath: /tmp/foo/bar + name: foo-mount-path + readOnly: true +``` + +## Choose Cilium version + +```yml +cilium_version: v1.12.1 +``` + +## Add variable to config + +Use following variables: + +Example: + +```yml +cilium_config_extra_vars: + enable-endpoint-routes: true +``` + +## Change Identity Allocation Mode + +Cilium assigns an identity for each endpoint. This identity is used to enforce basic connectivity between endpoints. + +Cilium currently supports two different identity allocation modes: + +- "crd" stores identities in kubernetes as CRDs (custom resource definition). + - These can be queried with `kubectl get ciliumid` +- "kvstore" stores identities in an etcd kvstore. + +## Enable Transparent Encryption + +Cilium supports the transparent encryption of Cilium-managed host traffic and +traffic between Cilium-managed endpoints either using IPsec or Wireguard. + +Wireguard option is only available in Cilium 1.10.0 and newer. + +### IPsec Encryption + +For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-ipsec/) + +To enable IPsec encryption, you just need to set three variables. + +```yml +cilium_encryption_enabled: true +cilium_encryption_type: "ipsec" +``` + +The third variable is `cilium_ipsec_key.` You need to create a secret key string for this variable. +Kubespray does not automate this process. +Cilium documentation currently recommends creating a key using the following command: + +```shell +echo "3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/urandom count=20 bs=1 2> /dev/null | xxd -p -c 64)) 128" +``` + +Note that Kubespray handles secret creation. So you only need to pass the key as the `cilium_ipsec_key` variable. + +### Wireguard Encryption + +For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/) + +To enable Wireguard encryption, you just need to set two variables. + +```yml +cilium_encryption_enabled: true +cilium_encryption_type: "wireguard" +``` + +Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer. + +## Bandwidth Manager + +Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + +Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. +In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + +Bandwidth Manager requires a v5.1.x or more recent Linux kernel. + +For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/) + +To use this function, set the following parameters + +```yml +cilium_enable_bandwidth_manager: true +``` + +## Install Cilium Hubble + +k8s-net-cilium.yml: + +```yml +cilium_enable_hubble: true ## enable support hubble in cilium +cilium_hubble_install: true ## install hubble-relay, hubble-ui +cilium_hubble_tls_generate: true ## install hubble-certgen and generate certificates +``` + +To validate that Hubble UI is properly configured, set up a port forwarding for hubble-ui service: + +```shell script +kubectl port-forward -n kube-system svc/hubble-ui 12000:80 +``` + +and then open [http://localhost:12000/](http://localhost:12000/). + +## Hubble metrics + +```yml +cilium_enable_hubble_metrics: true +cilium_hubble_metrics: + - dns + - drop + - tcp + - flow + - icmp + - http +``` + +[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics) + +## Upgrade considerations + +### Rolling-restart timeouts + +Cilium relies on the kernel's BPF support, which is extremely fast at runtime but incurs a compilation penalty on initialization and update. + +As a result, the Cilium DaemonSet pods can take a significant time to start, which scales with the number of nodes and endpoints in your cluster. + +As part of cluster.yml, this DaemonSet is restarted, and Kubespray's [default timeouts for this operation](../roles/network_plugin/cilium/defaults/main.yml) +are not appropriate for large clusters. + +This means that you will likely want to update these timeouts to a value more in-line with your cluster's number of nodes and their respective CPU performance. +This is configured by the following values: + +```yaml +# Configure how long to wait for the Cilium DaemonSet to be ready again +cilium_rolling_restart_wait_retries_count: 30 +cilium_rolling_restart_wait_retries_delay_seconds: 10 +``` + +The total time allowed (count * delay) should be at least `($number_of_nodes_in_cluster * $cilium_pod_start_time)` for successful rolling updates. There are no +drawbacks to making it higher and giving yourself a time buffer to accommodate transient slowdowns. + +Note: To find the `$cilium_pod_start_time` for your cluster, you can simply restart a Cilium pod on a node of your choice and look at how long it takes for it +to become ready. + +Note 2: The default CPU requests/limits for Cilium pods is set to a very conservative 100m:500m which will likely yield very slow startup for Cilium pods. You +probably want to significantly increase the CPU limit specifically if short bursts of CPU from Cilium are acceptable to you. diff --git a/kubespray/docs/cinder-csi.md b/kubespray/docs/cinder-csi.md new file mode 100644 index 0000000..b7dadf1 --- /dev/null +++ b/kubespray/docs/cinder-csi.md @@ -0,0 +1,102 @@ +# Cinder CSI Driver + +Cinder CSI driver allows you to provision volumes over an OpenStack deployment. The Kubernetes historic in-tree cloud provider is deprecated and will be removed in future versions. + +To enable Cinder CSI driver, uncomment the `cinder_csi_enabled` option in `group_vars/all/openstack.yml` and set it to `true`. + +To set the number of replicas for the Cinder CSI controller, you can change `cinder_csi_controller_replicas` option in `group_vars/all/openstack.yml`. + +You need to source the OpenStack credentials you use to deploy your machines that will host Kubernetes: `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`. + +Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected. + +If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. + +You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled. + +## Usage example + +To check if Cinder CSI Driver works properly, see first that the cinder-csi pods are running: + +```ShellSession +$ kubectl -n kube-system get pods | grep cinder +csi-cinder-controllerplugin-7f8bf99785-cpb5v 5/5 Running 0 100m +csi-cinder-nodeplugin-rm5x2 2/2 Running 0 100m +``` + +Check the associated storage class (if you enabled persistent_volumes): + +```ShellSession +$ kubectl get storageclass +NAME PROVISIONER AGE +cinder-csi cinder.csi.openstack.org 100m +``` + +You can run a PVC and an Nginx Pod using this file `nginx.yaml`: + +```yml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: csi-pvc-cinderplugin +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: cinder-csi + +--- +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - mountPath: /var/lib/www/html + name: csi-data-cinderplugin + volumes: + - name: csi-data-cinderplugin + persistentVolumeClaim: + claimName: csi-pvc-cinderplugin + readOnly: false +``` + +Apply this conf to your cluster: ```kubectl apply -f nginx.yml``` + +You should see the PVC provisioned and bound: + +```ShellSession +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +csi-pvc-cinderplugin Bound pvc-f21ad0a1-5b7b-405e-a462-48da5cb76beb 1Gi RWO cinder-csi 8s +``` + +And the volume mounted to the Nginx Pod (wait until the Pod is Running): + +```ShellSession +kubectl exec -it nginx -- df -h | grep /var/lib/www/html +/dev/vdb 976M 2.6M 958M 1% /var/lib/www/html +``` + +## Compatibility with in-tree cloud provider + +It is not necessary to enable OpenStack as a cloud provider for Cinder CSI Driver to work. +Though, you can run both the in-tree openstack cloud provider and the Cinder CSI Driver at the same time. The storage class provisioners associated to each one of them are differently named. + +## Cinder v2 support + +For the moment, only Cinder v3 is supported by the CSI Driver. + +## More info + +For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md). diff --git a/kubespray/docs/cloud.md b/kubespray/docs/cloud.md new file mode 100644 index 0000000..ccd30fb --- /dev/null +++ b/kubespray/docs/cloud.md @@ -0,0 +1,13 @@ +# Cloud providers + +## Provisioning + +You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation. + +## Deploy kubernetes + +With ansible-playbook command + +```ShellSession +ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml +``` diff --git a/kubespray/docs/cni.md b/kubespray/docs/cni.md new file mode 100644 index 0000000..e21ed54 --- /dev/null +++ b/kubespray/docs/cni.md @@ -0,0 +1,10 @@ +CNI +============== + +This network plugin only unpacks CNI plugins version `cni_version` into `/opt/cni/bin` and instructs kubelet to use cni, that is adds following cli params: + +`KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"` + +It's intended usage is for custom CNI configuration, e.g. manual routing tables + bridge + loopback CNI plugin outside kubespray scope. Furthermore, it's used for non-kubespray supported CNI plugins which you can install afterward. + +You are required to fill `/etc/cni/net.d` with valid CNI configuration after using kubespray. diff --git a/kubespray/docs/comparisons.md b/kubespray/docs/comparisons.md new file mode 100644 index 0000000..7ad1921 --- /dev/null +++ b/kubespray/docs/comparisons.md @@ -0,0 +1,26 @@ +# Comparison + +## Kubespray vs Kops + +Kubespray runs on bare metal and most clouds, using Ansible as its substrate for +provisioning and orchestration. [Kops](https://github.com/kubernetes/kops) performs the provisioning and orchestration +itself, and as such is less flexible in deployment platforms. For people with +familiarity with Ansible, existing Ansible deployments or the desire to run a +Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops, +however, is more tightly integrated with the unique features of the clouds it +supports so it could be a better choice if you know that you will only be using +one platform for the foreseeable future. + +## Kubespray vs Kubeadm + +[Kubeadm](https://github.com/kubernetes/kubeadm) provides domain Knowledge of Kubernetes clusters' life cycle +management, including self-hosted layouts, dynamic discovery services and so +on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html), +it may have been named a "Kubernetes cluster operator". Kubespray however, +does generic configuration management tasks from the "OS operators" ansible +world, plus some initial K8s clustering (with networking plugins included) and +control plane bootstrapping. + +Kubespray has started using `kubeadm` internally for cluster creation since v2.3 +in order to consume life cycle management domain knowledge from it +and offload generic OS configuration things from it, which hopefully benefits both sides. diff --git a/kubespray/docs/containerd.md b/kubespray/docs/containerd.md new file mode 100644 index 0000000..5b20e7f --- /dev/null +++ b/kubespray/docs/containerd.md @@ -0,0 +1,106 @@ +# containerd + +[containerd] An industry-standard container runtime with an emphasis on simplicity, robustness and portability +Kubespray supports basic functionality for using containerd as the default container runtime in a cluster. + +_To use the containerd container runtime set the following variables:_ + +## k8s_cluster.yml + +When kube_node contains etcd, you define your etcd cluster to be as well schedulable for Kubernetes workloads. Thus containerd and dockerd can not run at same time, must be set to bellow for running etcd cluster with only containerd. + +```yaml +container_manager: containerd +``` + +## etcd.yml + +```yaml +etcd_deployment_type: host +``` + +## Containerd config + +Example: define registry mirror for docker hub + +```yaml +containerd_registries: + "docker.io": + - "https://mirror.gcr.io" + - "https://registry-1.docker.io" +``` + +`containerd_registries` is ignored for pulling images when `image_command_tool=nerdctl` +(the default for `container_manager=containerd`). Use `crictl` instead, it supports +`containerd_registries` but lacks proper multi-arch support (see +[#8375](https://github.com/kubernetes-sigs/kubespray/issues/8375)): + +```yaml +image_command_tool: crictl +``` + +### Containerd Runtimes + +Containerd supports multiple runtime configurations that can be used with +[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the +details of containerd configuration. + +In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary: + +```yaml +containerd_runc_runtime: + name: runc + type: "io.containerd.runc.v2" + engine: "" + root: "" + options: + systemdCgroup: "false" + binaryName: /usr/local/bin/my-runc + base_runtime_spec: cri-base.json +``` + +Further runtimes can be configured with `containerd_additional_runtimes`, which +is a list of such dictionaries. + +Default runtime can be changed by setting `containerd_default_runtime`. + +#### Base runtime specs and limiting number of open files + +`base_runtime_spec` key in a runtime dictionary is used to explicitly +specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`, +which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and +updated to include a custom setting for maximum number of file descriptors per +container. + +You can change maximum number of file descriptors per container for the default +`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile` +variable. + +You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`: + +```yaml +containerd_base_runtime_specs: + cri-spec-custom.json: | + { + "ociVersion": "1.0.2-dev", + "process": { + "user": { + "uid": 0, + ... +``` + +The files in this dict will be placed in containerd config directory, +`/etc/containerd` by default. The files can then be referenced by filename in a +runtime: + +```yaml +containerd_runc_runtime: + name: runc + base_runtime_spec: cri-spec-custom.json + ... +``` + +[containerd]: https://containerd.io/ +[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/ +[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes +[runtime-spec]: https://github.com/opencontainers/runtime-spec diff --git a/kubespray/docs/cri-o.md b/kubespray/docs/cri-o.md new file mode 100644 index 0000000..43be723 --- /dev/null +++ b/kubespray/docs/cri-o.md @@ -0,0 +1,78 @@ +# CRI-O + +[CRI-O] is a lightweight container runtime for Kubernetes. +Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster. + +* Kubernetes supports CRI-O on v1.11.1 or later. +* etcd: configure either kubeadm managed etcd or host deployment + +_To use the CRI-O container runtime set the following variables:_ + +## all/all.yml + +```yaml +download_container: false +skip_downloads: false +etcd_deployment_type: host # optionally kubeadm +``` + +## k8s_cluster/k8s_cluster.yml + +```yaml +container_manager: crio +``` + +## all/crio.yml + +Enable docker hub registry mirrors + +```yaml +crio_registries: + - prefix: docker.io + insecure: false + blocked: false + location: registry-1.docker.io + unqualified: false + mirrors: + - location: 192.168.100.100:5000 + insecure: true + - location: mirror.gcr.io + insecure: false +``` + +## Note about pids_limit + +For heavily mult-threaded workloads like databases, the default of 1024 for pids-limit is too low. +This parameter controls not just the number of processes but also the amount of threads +(since a thread is technically a process with shared memory). See [cri-o#1921] + +In order to increase the default `pids_limit` for cri-o based deployments you need to set the `crio_pids_limit` +for your `k8s_cluster` ansible group or per node depending on the use case. + +```yaml +crio_pids_limit: 4096 +``` + +[CRI-O]: https://cri-o.io/ +[cri-o#1921]: https://github.com/cri-o/cri-o/issues/1921 + +## Note about user namespaces + +CRI-O has support for user namespaces. This feature is optional and can be enabled by setting the following two variables. + +```yaml +crio_runtimes: + - name: runc + path: /usr/bin/runc + type: oci + root: /run/runc + allowed_annotations: + - "io.kubernetes.cri-o.userns-mode" + +crio_remap_enable: true +``` + +The `allowed_annotations` configures `crio.conf` accordingly. + +The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to add an entry for the **containers** user. +By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space. diff --git a/kubespray/docs/debian.md b/kubespray/docs/debian.md new file mode 100644 index 0000000..8c25637 --- /dev/null +++ b/kubespray/docs/debian.md @@ -0,0 +1,41 @@ +# Debian Jessie + +Debian Jessie installation Notes: + +- Add + + ```ini + GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" + ``` + + to `/etc/default/grub`. Then update with + + ```ShellSession + sudo update-grub + sudo update-grub2 + sudo reboot + ``` + +- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd. + + ```ShellSession + apt-get -t jessie-backports install systemd + ``` + + (Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files) + +- Add the Ansible repository and install Ansible to get a proper version + + ```ShellSession + sudo add-apt-repository ppa:ansible/ansible + sudo apt-get update + sudo apt-get install ansible + ``` + +- Install Jinja2 and Python-Netaddr + + ```ShellSession + sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr + ``` + +Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) diff --git a/kubespray/docs/dns-stack.md b/kubespray/docs/dns-stack.md new file mode 100644 index 0000000..0b4cd77 --- /dev/null +++ b/kubespray/docs/dns-stack.md @@ -0,0 +1,296 @@ +# K8s DNS stack by Kubespray + +For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/admin/dns/) +[cluster add-on](https://releases.k8s.io/master/cluster/addons/README.md) +to serve as an authoritative DNS server for a given ``dns_domain`` and its +``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels). + +Other nodes in the inventory, like external storage nodes or a separate etcd cluster +node group, considered non-cluster and left up to the user to configure DNS resolve. + +## DNS variables + +There are several global variables which can be used to modify DNS settings: + +### ndots + +ndots value to be used in ``/etc/resolv.conf`` + +It is important to note that multiple search domains combined with high ``ndots`` +values lead to poor performance of DNS stack, so please choose it wisely. + +## dns_timeout + +timeout value to be used in ``/etc/resolv.conf`` + +## dns_attempts + +attempts value to be used in ``/etc/resolv.conf`` + +### searchdomains + +Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). + +Most Linux systems limit the total number of search domains to 6 and the total length of all search domains +to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit. + +`remove_default_searchdomains: true` will remove the default cluster search domains. + +Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as +additional search domains. Please take this into the accounts for the limits. + +### nameservers + +This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts +``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable +is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified). + +### upstream_dns_servers + +DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup +DNS servers in early cluster deployment when no cluster DNS is available yet. + +### dns_upstream_forward_extra_opts + +Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see for details). +These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable. +By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`). + +### coredns_external_zones + +Array of optional external zones to coredns forward queries to. It's injected into +`coredns`' config file before default kubernetes zone. Use it as an optimization for well-known zones and/or internal-only +domains, i.e. VPN for internal networks (default is unset) + +Example: + +```yaml +coredns_external_zones: +- zones: + - example.com + - example.io:1053 + nameservers: + - 1.1.1.1 + - 2.2.2.2 + cache: 5 +- zones: + - https://mycompany.local:4453 + nameservers: + - 192.168.0.53 + cache: 0 +- zones: + - mydomain.tld + nameservers: + - 10.233.0.3 + cache: 5 + rewrite: + - name stop website.tld website.namespace.svc.cluster.local +``` + +or as INI + +```ini +coredns_external_zones='[{"cache": 30,"zones":["example.com","example.io:453"],"nameservers":["1.1.1.1","2.2.2.2"]}]' +``` + +### dns_etchosts (coredns) + +Optional hosts file content to coredns use as /etc/hosts file. This will also be used by nodelocaldns, if enabled. + +Example: + +```yaml +dns_etchosts: | + 192.168.0.100 api.example.com + 192.168.0.200 ingress.example.com +``` + +### enable_coredns_reverse_dns_lookups + +Whether reverse DNS lookups are enabled in the coredns config. Defaults to `true`. + +### CoreDNS default zone cache plugin + +If you wish to configure the caching behaviour of CoreDNS on the default zone, you can do so using the `coredns_default_zone_cache_block` string block. + +An example value (more information on the [plugin's documentation](https://coredns.io/plugins/cache/)) to: + +* raise the max cache TTL to 3600 seconds +* raise the max amount of success responses to cache to 3000 +* disable caching of denial responses altogether +* enable pre-fetching of lookups with at least 10 lookups per minute before they expire + +Would be as follows: + +```yaml +coredns_default_zone_cache_block: | + cache 3600 { + success 3000 + denial 0 + prefetch 10 1m + } +``` + +## DNS modes supported by Kubespray + +You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``. + +### dns_mode + +``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available: + +#### dns_mode: coredns (default) + +This installs CoreDNS as the default cluster DNS for all queries. + +#### dns_mode: coredns_dual + +This installs CoreDNS as the default cluster DNS for all queries, plus a secondary CoreDNS stack. + +#### dns_mode: manual + +This does not install coredns, but allows you to specify +`manual_dns_server`, which will be configured on nodes for handling Pod DNS. +Use this method if you plan to install your own DNS server in the cluster after +initial deployment. + +#### dns_mode: none + +This does not install any of DNS solution at all. This basically disables cluster DNS completely and +leaves you with a non functional cluster. + +## resolvconf_mode + +``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers. +There are three modes available: + +### resolvconf_mode: host_resolvconf (default) + +This activates the classic Kubespray behavior that modifies the hosts ``/etc/resolv.conf`` file and dhclient +configuration to point to the cluster dns server (either coredns or coredns_dual, depending on dns_mode). + +As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first +stage (``dns_early: true``), ``/etc/resolv.conf`` is configured to use the DNS servers found in ``upstream_dns_servers`` +and ``nameservers``. Later, ``/etc/resolv.conf`` is reconfigured to use the cluster DNS server first, leaving +the other nameservers as backups. + +Also note, existing records will be purged from the `/etc/resolv.conf`, +including resolvconf's base/head/cloud-init config files and those that come from dhclient. + +### resolvconf_mode: docker_dns + +This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags. + +The following nameservers are added to the docker daemon (in the same order as listed here): + +* cluster nameserver (depends on dns_mode) +* content of optional upstream_dns_servers variable +* host system nameservers (read from hosts /etc/resolv.conf) + +The following search domains are added to the docker daemon (in the same order as listed here): + +* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``) +* content of optional searchdomains variable +* host system search domains (read from hosts /etc/resolv.conf) + +The following dns options are added to the docker daemon + +* ndots:{{ ndots }} +* timeout:2 +* attempts:2 + +These dns options can be overridden by setting a different list: + +```yaml +docker_dns_options: +- ndots:{{ ndots }} +- timeout:2 +- attempts:2 +- rotate +``` + +For normal PODs, k8s will ignore these options and setup its own DNS settings for the PODs, taking +the --cluster_dns (either coredns or coredns_dual, depending on dns_mode) kubelet option into account. +For ``hostNetwork: true`` PODs however, k8s will let docker setup DNS settings. Docker containers which +are not started/managed by k8s will also use these docker options. + +The host system name servers are added to ensure name resolution is also working while cluster DNS is not +running yet. This is especially important in early stages of cluster deployment. In this early stage, +DNS queries to the cluster DNS will timeout after a few seconds, resulting in the system nameserver being +used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS +servers, which in turn will forward queries to the system nameserver if required. + +### resolvconf_mode: none + +Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases. +The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve +cluster service names. + +## Nodelocal DNS cache + +Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default). + +More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1024-nodelocal-cache-dns/README.md). + +**As per the 2.10 release, Nodelocal DNS cache is enabled by default.** + +### External zones + +It's possible to extent the `nodelocaldns`' configuration by adding an array of external zones. For example: + +```yaml +nodelocaldns_external_zones: +- zones: + - example.com + - example.io:1053 + nameservers: + - 1.1.1.1 + - 2.2.2.2 + cache: 5 +- zones: + - https://mycompany.local:4453 + nameservers: + - 192.168.0.53 +``` + +### dns_etchosts (nodelocaldns) + +See [dns_etchosts](#dns_etchosts-coredns) above. + +### Nodelocal DNS HA + +Under some circumstances the single POD nodelocaldns implementation may not be able to be replaced soon enough and a cluster upgrade or a nodelocaldns upgrade can cause DNS requests to time out for short intervals. If for any reason your applications cannot tollerate this behavior you can enable a redundant nodelocal DNS pod on each node: + +```yaml +enable_nodelocaldns_secondary: true +``` + +**Note:** when the nodelocaldns secondary is enabled, the primary is instructed to no longer tear down the iptables rules it sets up to direct traffic to itself. In case both daemonsets have failing pods on the same node, this can cause a DNS blackout with traffic no longer being forwarded to the coredns central service as a fallback. Please ensure you account for this also if you decide to disable the nodelocaldns cache. + +There is a time delta (in seconds) allowed for the secondary nodelocaldns to survive in case both primary and secondary daemonsets are updated at the same time. It is advised to tune this variable after you have performed some tests in your own environment. + +```yaml +nodelocaldns_secondary_skew_seconds: 5 +``` + +## Limitations + +* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can + not answer with authority to arbitrary recursive resolvers. This task is left + for future. See [official SkyDns docs](https://github.com/skynetservices/skydns) + for details. + +* There is + [no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554) + for the SkyDNS ``ndots`` param. + +* the ``searchdomains`` have a limitation of a 6 names and 256 chars + length. Due to default ``svc, default.svc`` subdomains, the actual + limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true` + added you are back to 6 names. + +* the ``nameservers`` have a limitation of a 3 servers, although there + is a way to mitigate that with the ``upstream_dns_servers``, + see below. Anyway, the ``nameservers`` can take no more than a two + custom DNS servers because of one slot is reserved for a Kubernetes + cluster needs. diff --git a/kubespray/docs/docker.md b/kubespray/docs/docker.md new file mode 100644 index 0000000..4abe11a --- /dev/null +++ b/kubespray/docs/docker.md @@ -0,0 +1,99 @@ +# Docker support + +The docker runtime is supported by kubespray and while the `dockershim` is deprecated to be removed in kubernetes 1.24+ there are alternative ways to use docker such as through the [cri-dockerd](https://github.com/Mirantis/cri-dockerd) project supported by Mirantis. + +Using the docker container manager: + +```yaml +container_manager: docker +``` + +*Note:* `cri-dockerd` has replaced `dockershim` across supported kubernetes version in kubespray 2.20. + +Enabling the `overlay2` graph driver: + +```yaml +docker_storage_options: -s overlay2 +``` + +Enabling `docker_container_storage_setup`, it will configure devicemapper driver on Centos7 or RedHat7. +Deployers must be define a disk path for `docker_container_storage_setup_devs`, otherwise docker-storage-setup will be executed incorrectly. + +```yaml +docker_container_storage_setup: true +docker_container_storage_setup_devs: /dev/vdb +``` + +Changing the Docker cgroup driver (native.cgroupdriver); valid options are `systemd` or `cgroupfs`, default is `systemd`: + +```yaml +docker_cgroup_driver: systemd +``` + +If you have more than 3 nameservers kubespray will only use the first 3 else it will fail. Set the `docker_dns_servers_strict` to `false` to prevent deployment failure. + +```yaml +docker_dns_servers_strict: false +``` + +Set the path used to store Docker data: + +```yaml +docker_daemon_graph: "/var/lib/docker" +``` + +Changing the docker daemon iptables support: + +```yaml +docker_iptables_enabled: "false" +``` + +Docker log options: + +```yaml +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" +``` + +Changre the docker `bin_dir`, this should not be changed unless you use a custom docker package: + +```yaml +docker_bin_dir: "/usr/bin" +``` + +To keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'. +kubespray deletes the docker package on each run, so caching the package makes sense: + +```yaml +docker_rpm_keepcache: 1 +``` + +Allowing insecure-registry access to self hosted registries. Can be ipaddress and domain_name. + +```yaml +## example define 172.19.16.11 or mirror.registry.io +docker_insecure_registries: + - mirror.registry.io + - 172.19.16.11 +``` + +Adding other registry, i.e. China registry mirror: + +```yaml +docker_registry_mirrors: + - https://registry.docker-cn.com + - https://mirror.aliyuncs.com +``` + +Overriding default system MountFlags value. This option takes a mount propagation flag: `shared`, `slave` or `private`, which control whether mounts in the file system namespace set up for docker will receive or propagate mounts and unmounts. Leave empty for system default: + +```yaml +docker_mount_flags: +``` + +Adding extra options to pass to the docker daemon: + +```yaml +## This string should be exactly as you wish it to appear. +docker_options: "" +``` diff --git a/kubespray/docs/downloads.md b/kubespray/docs/downloads.md new file mode 100644 index 0000000..9961eab --- /dev/null +++ b/kubespray/docs/downloads.md @@ -0,0 +1,41 @@ +# Downloading binaries and containers + +Kubespray supports several download/upload modes. The default is: + +* Each node downloads binaries and container images on its own, which is ``download_run_once: False``. +* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``. +* For system managed containers, like kubelet or etcd, pull policy is ``download_always_pull: False``, which is pull if only the wanted repo and tag/sha256 digest differs from that the host has. + +There is also a "pull once, push many" mode as well: + +* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube_control_plane`. +* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that the container runtime is installed and running on the Ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to use the container runtime. Note: even if `download_localhost` is false, files will still be copied to the Ansible server (local host) from the delegated download node, and then distributed from the Ansible server to all cluster nodes. + +NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the storage of the container runtime on that node, instead of just the images required for that node. + +On caching: + +* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB. +* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the download delegate node to the local cache, or use that cache to pre-provision those nodes. If you have a full cache with container images and files and you don’t need to download anything, but want to use a cache - set `download_force_cache` to `True`. +* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting `download_keep_remote_cache` will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images. + +Container images and binary files are described by the vars like ``foo_version``, +``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``, +``foo_image_tag`` or optional ``foo_digest_checksum`` for containers. + +Container images may be defined by its repo and tag, for example: +`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest: +`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`. + +Note, the SHA256 digest and the image tag must be both specified and correspond +to each other. The given example above is represented by the following vars: + +```yaml +dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193 +dnsmasq_image_repo: andyshinn/dnsmasq +dnsmasq_image_tag: '2.72' +``` + +The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container +images as well. See also the DNS stack docs for the related intranet configuration, +so the hosts can resolve those urls and repos. diff --git a/kubespray/docs/encrypting-secret-data-at-rest.md b/kubespray/docs/encrypting-secret-data-at-rest.md new file mode 100644 index 0000000..3674282 --- /dev/null +++ b/kubespray/docs/encrypting-secret-data-at-rest.md @@ -0,0 +1,22 @@ +# Encrypting Secret Data at Rest + +Before enabling Encrypting Secret Data at Rest, please read the following documentation carefully. + + + +As you can see from the documentation above, 5 encryption providers are supported as of today (22.02.2022). + +As default value for the provider we have chosen `secretbox`. + +Alternatively you can use the values `identity`, `aesgcm`, `aescbc` or `kms`. + +| Provider | Why we have decided against the value as default | +|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| identity | no encryption | +| aesgcm | Must be rotated every 200k writes | +| aescbc | Not recommended due to CBC's vulnerability to padding oracle attacks. | +| kms | Is the official recommended way, but assumes that a key management service independent of Kubernetes exists, we cannot assume this in all environments, so not a suitable default value. | + +## Details about Secretbox + +Secretbox uses [Poly1305](https://cr.yp.to/mac.html) as message-authentication code and [XSalsa20](https://www.xsalsa20.com/) as secret-key authenticated encryption and secret-key encryption. diff --git a/kubespray/docs/equinix-metal.md b/kubespray/docs/equinix-metal.md new file mode 100644 index 0000000..61260f0 --- /dev/null +++ b/kubespray/docs/equinix-metal.md @@ -0,0 +1,100 @@ +# Equinix Metal + +Kubespray provides support for bare metal deployments using the [Equinix Metal](http://metal.equinix.com). +Deploying upon bare metal allows Kubernetes to run at locations where an existing public or private cloud might not exist such +as cell tower, edge collocated installations. The deployment mechanism used by Kubespray for Equinix Metal is similar to that used for +AWS and OpenStack clouds (notably using Terraform to deploy the infrastructure). Terraform uses the Equinix Metal provider plugin +to provision and configure hosts which are then used by the Kubespray Ansible playbooks. The Ansible inventory is generated +dynamically from the Terraform state file. + +## Local Host Configuration + +To perform this installation, you will need a localhost to run Terraform/Ansible (laptop, VM, etc) and an account with Equinix Metal. +In this example, we're using an m1.large CentOS 7 OpenStack VM as the localhost to kickoff the Kubernetes installation. +You'll need Ansible, Git, and PIP. + +```bash +sudo yum install epel-release +sudo yum install ansible +sudo yum install git +sudo yum install python-pip +``` + +## Playbook SSH Key + +An SSH key is needed by Kubespray/Ansible to run the playbooks. +This key is installed into the bare metal hosts during the Terraform deployment. +You can generate a key new key or use an existing one. + +```bash +ssh-keygen -f ~/.ssh/id_rsa +``` + +## Install Terraform + +Terraform is required to deploy the bare metal infrastructure. The steps below are for installing on CentOS 7. +[More terraform installation options are available.](https://learn.hashicorp.com/terraform/getting-started/install.html) + +Grab the latest version of Terraform and install it. + +```bash +echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_linux_amd64.zip" +sudo yum install unzip +sudo unzip terraform_0.14.10_linux_amd64.zip -d /usr/local/bin/ +``` + +## Download Kubespray + +Pull over Kubespray and setup any required libraries. + +```bash +git clone https://github.com/kubernetes-sigs/kubespray +cd kubespray +``` + +## Install Ansible + +Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible) + +## Cluster Definition + +In this example, a new cluster called "alpha" will be created. + +```bash +cp -LRp contrib/terraform/packet/sample-inventory inventory/alpha +cd inventory/alpha/ +ln -s ../../contrib/terraform/packet/hosts +``` + +Details about the cluster, such as the name, as well as the authentication tokens and project ID +for Equinix Metal need to be defined. To find these values see [Equinix Metal API Accounts](https://metal.equinix.com/developers/docs/accounts/). + +```bash +vi cluster.tfvars +``` + +* cluster_name = alpha +* packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456 +* public_key_path = 12345678-90AB-CDEF-GHIJ-KLMNOPQRSTUV + +## Deploy Bare Metal Hosts + +Initializing Terraform will pull down any necessary plugins/providers. + +```bash +terraform init ../../contrib/terraform/packet/ +``` + +Run Terraform to deploy the hardware. + +```bash +terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet +``` + +## Run Kubespray Playbooks + +With the bare metal infrastructure deployed, Kubespray can now install Kubernetes and setup the cluster. + +```bash +ansible-playbook --become -i inventory/alpha/hosts cluster.yml +``` diff --git a/kubespray/docs/etcd.md b/kubespray/docs/etcd.md new file mode 100644 index 0000000..17aa291 --- /dev/null +++ b/kubespray/docs/etcd.md @@ -0,0 +1,46 @@ +# etcd + +## Deployment Types + +It is possible to deploy etcd with three methods. To change the default deployment method (host), use the `etcd_deployment_type` variable. Possible values are `host`, `kubeadm`, and `docker`. + +### Host + +Host deployment is the default method. Using this method will result in etcd installed as a systemd service. + +### Docker + +Installs docker in etcd group members and runs etcd on docker containers. Only usable when `container_manager` is set to `docker`. + +### Kubeadm + +This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod in master hosts. + +## Metrics + +To expose metrics on a separate HTTP port, define it in the inventory with: + +```yaml +etcd_metrics_port: 2381 +``` + +To create a service `etcd-metrics` and associated endpoints in the `kube-system` namespace, +define it's labels in the inventory with: + +```yaml +etcd_metrics_service_labels: + k8s-app: etcd + app.kubernetes.io/managed-by: Kubespray + app: kube-prometheus-stack-kube-etcd + release: prometheus-stack +``` + +The last two labels in the above example allows to scrape the metrics from the +[kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) +chart with the following Helm `values.yaml` : + +```yaml +kubeEtcd: + service: + enabled: false +``` diff --git a/kubespray/docs/fcos.md b/kubespray/docs/fcos.md new file mode 100644 index 0000000..238bdf4 --- /dev/null +++ b/kubespray/docs/fcos.md @@ -0,0 +1,69 @@ +# Fedora CoreOS + +Tested with stable version 34.20210611.3.0 + +Because package installation with `rpm-ostree` requires a reboot, playbook may fail while bootstrap. +Restart playbook again. + +## Containers + +Tested with + +- containerd +- crio + +## Network + +### calico + +To use calico create sysctl file with ignition: + +```yaml +files: + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.all.rp_filter=1 +``` + +## libvirt setup + +### Prepare + +Prepare ignition and serve via http (a.e. python -m http.server ) + +```json +{ + "ignition": { + "version": "3.0.0" + }, + + "passwd": { + "users": [ + { + "name": "ansibleUser", + "sshAuthorizedKeys": [ + "ssh-rsa ..publickey.." + ], + "groups": [ "wheel" ] + } + ] + } +} +``` + +### create guest + +```ShellSeasion +machine_name=myfcos1 +ignition_url=http://mywebserver/fcos.ign + +fcos_version=34.20210611.3.0 +kernel=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-kernel-x86_64 +initrd=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-initramfs.x86_64.img +rootfs=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-rootfs.x86_64.img +kernel_args="console=ttyS0 coreos.live.rootfs_url=${rootfs} coreos.inst.install_dev=/dev/sda coreos.inst.stream=stable coreos.inst.ignition_url=${ignition_url}" +sudo virt-install --name ${machine_name} --ram 4048 --graphics=none --vcpus 2 --disk size=20 \ + --network bridge=virbr0 \ + --install kernel=${kernel},initrd=${initrd},kernel_args_overwrite=yes,kernel_args="${kernel_args}" +``` diff --git a/kubespray/docs/figures/kubespray-calico-rr.png b/kubespray/docs/figures/kubespray-calico-rr.png new file mode 100644 index 0000000..2dacdb5 Binary files /dev/null and b/kubespray/docs/figures/kubespray-calico-rr.png differ diff --git a/kubespray/docs/figures/loadbalancer_localhost.png b/kubespray/docs/figures/loadbalancer_localhost.png new file mode 100644 index 0000000..0732d54 Binary files /dev/null and b/kubespray/docs/figures/loadbalancer_localhost.png differ diff --git a/kubespray/docs/flannel.md b/kubespray/docs/flannel.md new file mode 100644 index 0000000..78937b1 --- /dev/null +++ b/kubespray/docs/flannel.md @@ -0,0 +1,51 @@ +# Flannel + +Flannel is a network fabric for containers, designed for Kubernetes + +Supported [backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard): `vxlan`, `host-gw` and `wireguard` + +**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31). + +## Verifying flannel install + +* Flannel configuration file should have been created there + +```ShellSession +cat /run/flannel/subnet.env +FLANNEL_NETWORK=10.233.0.0/18 +FLANNEL_SUBNET=10.233.16.1/24 +FLANNEL_MTU=1450 +FLANNEL_IPMASQ=false +``` + +* Check if the network interface has been created + +```ShellSession +ip a show dev flannel.1 +4: flannel.1: mtu 1450 qdisc noqueue state UNKNOWN group default + link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff + inet 10.233.16.0/18 scope global flannel.1 + valid_lft forever preferred_lft forever + inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link + valid_lft forever preferred_lft forever +``` + +* Try to run a container and check its ip address + +```ShellSession +kubectl run test --image=busybox --command -- tail -f /dev/null +replicationcontroller "test" created + +kubectl describe po test-34ozs | grep ^IP +IP: 10.233.16.2 +``` + +```ShellSession +kubectl exec test-34ozs -- ip a show dev eth0 +8: eth0@if9: mtu 1450 qdisc noqueue + link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff + inet 10.233.16.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08 + valid_lft forever preferred_lft forever +``` diff --git a/kubespray/docs/flatcar.md b/kubespray/docs/flatcar.md new file mode 100644 index 0000000..cdd2c6a --- /dev/null +++ b/kubespray/docs/flatcar.md @@ -0,0 +1,14 @@ +Flatcar Container Linux bootstrap +=============== + +Example with Ansible: + +Before running the cluster playbook you must satisfy the following requirements: + +General Flatcar Pre-Installation Notes: + +- Ensure that the bin_dir is set to `/opt/bin` +- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task. +- The resolvconf_mode setting of `docker_dns` **does not** work for Flatcar. This is because we do not edit the systemd service file for docker on Flatcar nodes. Instead, just use the default `host_resolvconf` mode. It should work out of the box. + +Then you can proceed to [cluster deployment](#run-deployment) diff --git a/kubespray/docs/gcp-lb.md b/kubespray/docs/gcp-lb.md new file mode 100644 index 0000000..8e8f8c4 --- /dev/null +++ b/kubespray/docs/gcp-lb.md @@ -0,0 +1,20 @@ +# GCP Load Balancers for type=LoadBalacer of Kubernetes Services + +Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer. + +This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify: + +```ShellSession + --cloud-provider=gce + --cloud-config=/etc/kubernetes/cloud-config +``` + +To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`: + +```yaml + cloud_provider: gce + gce_node_tags: k8s-lb +``` + +When you will setup it and create SVC in Kubernetes with `type=LoadBalancer`, cloud provider will create public IP and will set firewall. +Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources. diff --git a/kubespray/docs/gcp-pd-csi.md b/kubespray/docs/gcp-pd-csi.md new file mode 100644 index 0000000..88fa060 --- /dev/null +++ b/kubespray/docs/gcp-pd-csi.md @@ -0,0 +1,77 @@ +# GCP Persistent Disk CSI Driver + +The GCP Persistent Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Google Cloud Platform. The CSI driver replaces to volume provioning done by the in-tree azure cloud provider which is deprecated. + +To deploy GCP Persistent Disk CSI driver, uncomment the `gcp_pd_csi_enabled` option in `group_vars/all/gcp.yml` and set it to `true`. + +## GCP Persistent Disk Storage Class + +If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. + +## GCP credentials + +In order for the CSI driver to provision disks, you need to create for it a service account on GCP with the appropriate permissions. + +Follow these steps to configure it: + +```ShellSession +# This will open a web page for you to authenticate +gcloud auth login +export PROJECT=nameofmyproject +gcloud config set project $PROJECT + +git clone https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver $GOPATH/src/sigs.k8s.io/gcp-compute-persistent-disk-csi-driver + +export GCE_PD_SA_NAME=my-gce-pd-csi-sa +export GCE_PD_SA_DIR=/my/safe/credentials/directory + +./deploy/setup-project.sh +``` + +The above will create a file named `cloud-sa.json` in the specified `GCE_PD_SA_DIR`. This file contains the service account with the appropriate credentials for the CSI driver to perform actions on GCP to request disks for pods. + +You need to provide this file's path through the variable `gcp_pd_csi_sa_cred_file` in `inventory/mycluster/group_vars/all/gcp.yml` + +You can now deploy Kubernetes with Kubespray over GCP. + +## GCP PD CSI Driver test + +To test the dynamic provisioning using GCP PD CSI driver, make sure to have the storage class deployed (through persistent volumes), and apply the following manifest: + +```yml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: podpvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-gce-pd + resources: + requests: + storage: 1Gi + +--- +apiVersion: v1 +kind: Pod +metadata: + name: web-server +spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - mountPath: /var/lib/www/html + name: mypvc + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: podpvc + readOnly: false +``` + +## GCP PD documentation + +You can find the official GCP Persistent Disk CSI driver installation documentation here: [GCP PD CSI Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/docs/kubernetes/user-guides/driver-install.md +) diff --git a/kubespray/docs/getting-started.md b/kubespray/docs/getting-started.md new file mode 100644 index 0000000..32660d1 --- /dev/null +++ b/kubespray/docs/getting-started.md @@ -0,0 +1,144 @@ +# Getting started + +## Building your own inventory + +Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is +an example inventory located +[here](https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/inventory.ini). + +You can use an +[inventory generator](https://github.com/kubernetes-sigs/kubespray/blob/master/contrib/inventory_builder/inventory.py) +to create or modify an Ansible inventory. Currently, it is limited in +functionality and is only used for configuring a basic Kubespray cluster inventory, but it does +support creating inventory file for large clusters as well. It now supports +separated ETCD and Kubernetes control plane roles from node role if the size exceeds a +certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` for more information. + +Example inventory generator usage: + +```ShellSession +cp -r inventory/sample inventory/mycluster +declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) +CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]} +``` + +Then use `inventory/mycluster/hosts.yml` as inventory file. + +## Starting custom deployment + +Once you have an inventory, you may want to customize deployment data vars +and start the deployment: + +**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars: + +```ShellSession +ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \ + --private-key=~/.ssh/private_key +``` + +See more details in the [ansible guide](/docs/ansible.md). + +### Adding nodes + +You may want to add worker, control plane or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your control planes. This is especially helpful when doing something like autoscaling your clusters. + +- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html)). +- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`: + +```ShellSession +ansible-playbook -i inventory/mycluster/hosts.yml scale.yml -b -v \ + --private-key=~/.ssh/private_key +``` + +### Remove nodes + +You may want to remove **control plane**, **worker**, or **etcd** nodes from your +existing cluster. This can be done by re-running the `remove-node.yml` +playbook. First, all specified nodes will be drained, then stop some +kubernetes services and delete some certificates, +and finally execute the kubectl command to delete these nodes. +This can be combined with the add node function. This is generally helpful +when doing something like autoscaling your clusters. Of course, if a node +is not working, you can remove the node and install it again. + +Use `--extra-vars "node=,"` to select the node(s) you want to delete. + +```ShellSession +ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \ +--private-key=~/.ssh/private_key \ +--extra-vars "node=nodename,nodename2" +``` + +If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=false` +to skip the node reset step. If one node is unavailable, but others you wish +to remove are able to connect via SSH, you could set `reset_nodes=false` as a host +var in inventory. + +## Connecting to Kubernetes + +By default, Kubespray configures kube_control_plane hosts with insecure access to +kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, +because kubectl will use to connect. The kubeconfig files +generated will point to localhost (on kube_control_planes) and kube_node hosts will +connect either to a localhost nginx proxy or to a loadbalancer if configured. +More details on this process are in the [HA guide](/docs/ha-mode.md). + +Kubespray permits connecting to the cluster remotely on any IP of any +kube_control_plane host on port 6443 by default. However, this requires +authentication. One can get a kubeconfig from kube_control_plane hosts +(see [below](#accessing-kubernetes-api)). + +For more information on kubeconfig and accessing a Kubernetes cluster, refer to +the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). + +## Accessing Kubernetes Dashboard + +Supported version is kubernetes-dashboard v2.0.x : + +- Login option : token/kubeconfig by default +- Deployed by default in "kube-system" namespace, can be overridden with `dashboard_namespace: kubernetes-dashboard` in inventory, +- Only serves over https + +Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/tree/master/docs/user/accessing-dashboard). With kubespray's default deployment in kube-system namespace, instead of kubernetes-dashboard : + +- Proxy URL is +- kubectl commands must be run with "-n kube-system" + +Accessing through Ingress is highly recommended. For proxy access, please note that proxy must listen to [localhost](https://github.com/kubernetes/dashboard/issues/692#issuecomment-220492484) (`proxy --address="x.x.x.x"` will not work) + +For token authentication, guide to create Service Account is provided in [dashboard sample user](https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md) doc. Still take care of default namespace. + +Access can also by achieved via ssh tunnel on a control plane : + +```bash +# localhost:8081 will be sent to control-plane-1's own localhost:8081 +ssh -L8001:localhost:8001 user@control-plane-1 +sudo -i +kubectl proxy +``` + +## Accessing Kubernetes API + +The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane +host and can optionally be configured on your ansible host by setting +`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration: + +- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`. +- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment. +- The location where these files are downloaded to can be configured via the `artifacts_dir` variable. + +NOTE: The controller host name in the admin.conf file might be a private IP. If so, change it to use the controller's public IP or the cluster's load balancer. + +You can see a list of nodes by running the following commands: + +```ShellSession +cd inventory/mycluster/artifacts +./kubectl.sh get nodes +``` + +If desired, copy admin.conf to ~/.kube/config. + +## Setting up your first cluster + +[Setting up your first cluster](/docs/setting-up-your-first-cluster.md) is an + applied step-by-step guide for setting up your first cluster with Kubespray. diff --git a/kubespray/docs/gvisor.md b/kubespray/docs/gvisor.md new file mode 100644 index 0000000..ef0a64b --- /dev/null +++ b/kubespray/docs/gvisor.md @@ -0,0 +1,16 @@ +# gVisor + +[gVisor](https://gvisor.dev/docs/) is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system. + +gVisor includes an Open Container Initiative (OCI) runtime called runsc that makes it easy to work with existing container tooling. The runsc runtime integrates with Docker and Kubernetes, making it simple to run sandboxed containers. + +## Usage + +To enable gVisor you should be using a container manager that is compatible with selecting the [RuntimeClass](https://kubernetes.io/docs/concepts/containers/runtime-class/) such as `containerd`. + +Containerd support: + +```yaml +container_manager: containerd +gvisor_enabled: true +``` diff --git a/kubespray/docs/ha-mode.md b/kubespray/docs/ha-mode.md new file mode 100644 index 0000000..de80199 --- /dev/null +++ b/kubespray/docs/ha-mode.md @@ -0,0 +1,157 @@ +# HA endpoints for K8s + +The following components require a highly available endpoints: + +* etcd cluster, +* kube-apiserver service instances. + +The latter relies on a 3rd side reverse proxy, like Nginx or HAProxy, to +achieve the same goal. + +## Etcd + +The etcd clients (kube-api-masters) are configured with the list of all etcd peers. If the etcd-cluster has multiple instances, it's configured in HA already. + +## Kube-apiserver + +K8s components require a loadbalancer to access the apiservers via a reverse +proxy. Kubespray includes support for an nginx-based proxy that resides on each +non-master Kubernetes node. This is referred to as localhost loadbalancing. It +is less efficient than a dedicated load balancer because it creates extra +health checks on the Kubernetes apiserver, but is more practical for scenarios +where an external LB or virtual IP management is inconvenient. This option is +configured by the variable `loadbalancer_apiserver_localhost` (defaults to +`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined). +You may also define the port the local internal loadbalancer uses by changing, +`loadbalancer_apiserver_port`. This defaults to the value of +`kube_apiserver_port`. It is also important to note that Kubespray will only +configure kubelet and kube-proxy on non-master nodes to use the local internal +loadbalancer. + +If you choose to NOT use the local internal loadbalancer, you will need to +use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the +`access_ip` or IP address of the first server node in the `kube_control_plane` group. +It can also configure clients to use endpoints for a given loadbalancer type. +The following diagram shows how traffic to the apiserver is directed. + +![Image](figures/loadbalancer_localhost.png?raw=true) + +A user may opt to use an external loadbalancer (LB) instead. An external LB +provides access for external clients, while the internal LB accepts client +connections only to the localhost. +Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is +an example configuration for a HAProxy service acting as an external LB: + +```raw +listen kubernetes-apiserver-https + bind :8383 + mode tcp + option log-health-checks + timeout client 3h + timeout server 3h + server master1 :6443 check check-ssl verify none inter 10000 + server master2 :6443 check check-ssl verify none inter 10000 + balance roundrobin +``` + + Note: That's an example config managed elsewhere outside of Kubespray. + +And the corresponding example global vars for such a "cluster-aware" +external LB with the cluster API access modes configured in Kubespray: + +```yml +apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com" +loadbalancer_apiserver: + address: + port: 8383 +``` + + Note: The default kubernetes apiserver configuration binds to all interfaces, + so you will need to use a different port for the vip from that the API is + listening on, or set the `kube_apiserver_bind_address` so that the API only + listens on a specific interface (to avoid conflict with haproxy binding the + port on the VIP address) + +This domain name, or default "lb-apiserver.kubernetes.local", will be inserted +into the `/etc/hosts` file of all servers in the `k8s_cluster` group and wired +into the generated self-signed TLS/SSL certificates as well. Note that +the HAProxy service should as well be HA and requires a VIP management, which +is out of scope of this doc. + +There is a special case for an internal and an externally configured (not with +Kubespray) LB used simultaneously. Keep in mind that the cluster is not aware +of such an external LB and you need no to specify any configuration variables +for it. + + Note: TLS/SSL termination for externally accessed API endpoints' will **not** + be covered by Kubespray for that case. Make sure your external LB provides it. + Alternatively you may specify an externally load balanced VIPs in the + `supplementary_addresses_in_ssl_keys` list. Then, kubespray will add them into + the generated cluster certificates as well. + +Aside of that specific case, the `loadbalancer_apiserver` considered mutually +exclusive to `loadbalancer_apiserver_localhost`. + +Access API endpoints are evaluated automatically, as the following: + +| Endpoint type | kube_control_plane | non-master | external | +|------------------------------|------------------------------------------|-------------------------|-----------------------| +| Local LB (default) | `https://dbip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` | +| Local LB (default) + cbip | `https://cbip:sp` and `https://lc:nsp` | `https://lc:nsp` | `https://m[0].aip:sp` | +| Local LB + Unmanaged here LB | `https://dbip:sp` | `https://lc:nsp` | `https://ext` | +| External LB, no internal | `https://dbip:sp` | `` | `https://lb:lp` | +| No ext/int LB | `https://dbip:sp` | `` | `https://m[0].aip:sp` | + +Where: + +* `m[0]` - the first node in the `kube_control_plane` group; +* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`; +* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray; +* `lc` - localhost; +* `cbip` - a custom bind IP, `kube_apiserver_bind_address`; +* `dbip` - localhost for the default bind IP '0.0.0.0'; +* `nsp` - nginx secure port, `loadbalancer_apiserver_port`, defers to `sp`; +* `sp` - secure port, `kube_apiserver_port`; +* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port; +* `ip` - the node IP, defers to the ansible IP; +* `aip` - `access_ip`, defers to the ip. + +A second and a third column represent internal cluster access modes. The last +column illustrates an example URI to access the cluster APIs externally. +Kubespray has nothing to do with it, this is informational only. + +As you can see, the masters' internal API endpoints are always +contacted via the local bind IP, which is `https://bip:sp`. + +## Optional configurations + +### ETCD with a LB + +In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overridden in group_vars + +* `etcd_access_addresses` +* `etcd_client_url` +* `etcd_cert_alt_names` +* `etcd_cert_alt_ips` + +#### Example of a VIP w/ FQDN + +```yaml +etcd_access_addresses: https://etcd.example.com:2379 +etcd_client_url: https://etcd.example.com:2379 +etcd_cert_alt_names: + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" + - "etcd" + - "etcd.example.com" # This one needs to be added to the default etcd_cert_alt_names +``` + +#### Example of a VIP w/o FQDN (IP only) + +```yaml +etcd_access_addresses: https://2.3.7.9:2379 +etcd_client_url: https://2.3.7.9:2379 +etcd_cert_alt_ips: + - "2.3.7.9" +``` diff --git a/kubespray/docs/hardening.md b/kubespray/docs/hardening.md new file mode 100644 index 0000000..b3359b7 --- /dev/null +++ b/kubespray/docs/hardening.md @@ -0,0 +1,139 @@ +# Cluster Hardening + +If you want to improve the security on your cluster and make it compliant with the [CIS Benchmarks](https://learn.cisecurity.org/benchmarks), here you can find a configuration to harden your **kubernetes** installation. + +To apply the hardening configuration, create a file (eg. `hardening.yaml`) and paste the content of the following code snippet into that. + +## Minimum Requirements + +The **kubernetes** version should be at least `v1.23.6` to have all the most recent security features (eg. the new `PodSecurity` admission plugin, etc). + +**N.B.** Some of these configurations have just been added to **kubespray**, so ensure that you have the latest version to make it works properly. Also, ensure that other configurations doesn't override these. + +`hardening.yaml`: + +```yaml +# Hardening +--- + +## kube-apiserver +authorization_modes: ['Node', 'RBAC'] +# AppArmor-based OS +# kube_apiserver_feature_gates: ['AppArmor=true'] +kube_apiserver_request_timeout: 120s +kube_apiserver_service_account_lookup: true + +# enable kubernetes audit +kubernetes_audit: true +audit_log_path: "/var/log/kube-apiserver-log.json" +audit_log_maxage: 30 +audit_log_maxbackups: 10 +audit_log_maxsize: 100 + +tls_min_version: VersionTLS12 +tls_cipher_suites: + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + +# enable encryption at rest +kube_encrypt_secret_data: true +kube_encryption_resources: [secrets] +kube_encryption_algorithm: "secretbox" + +kube_apiserver_enable_admission_plugins: + - EventRateLimit + - AlwaysPullImages + - ServiceAccount + - NamespaceLifecycle + - NodeRestriction + - LimitRanger + - ResourceQuota + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - PodNodeSelector + - PodSecurity +kube_apiserver_admission_control_config_file: true +# EventRateLimit plugin configuration +kube_apiserver_admission_event_rate_limits: + limit_1: + type: Namespace + qps: 50 + burst: 100 + cache_size: 2000 + limit_2: + type: User + qps: 50 + burst: 100 +kube_profiling: false + +## kube-controller-manager +kube_controller_manager_bind_address: 127.0.0.1 +kube_controller_terminated_pod_gc_threshold: 50 +# AppArmor-based OS +# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"] +kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"] + +## kube-scheduler +kube_scheduler_bind_address: 127.0.0.1 +kube_kubeadm_scheduler_extra_args: + profiling: false +# AppArmor-based OS +# kube_scheduler_feature_gates: ["AppArmor=true"] + +## etcd +etcd_deployment_type: kubeadm + +## kubelet +kubelet_authentication_token_webhook: true +kube_read_only_port: 0 +kubelet_rotate_server_certificates: true +kubelet_protect_kernel_defaults: true +kubelet_event_record_qps: 1 +kubelet_rotate_certificates: true +kubelet_streaming_connection_idle_timeout: "5m" +kubelet_make_iptables_util_chains: true +kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"] +kubelet_seccomp_default: true +kubelet_systemd_hardening: true +# In case you have multiple interfaces in your +# control plane nodes and you want to specify the right +# IP addresses, kubelet_secure_addresses allows you +# to specify the IP from which the kubelet +# will receive the packets. +kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112" + +# additional configurations +kube_owner: root +kube_cert_group: root + +# create a default Pod Security Configuration and deny running of insecure pods +# kube_system namespace is exempted by default +kube_pod_security_use_default: true +kube_pod_security_default_enforce: restricted +``` + +Let's take a deep look to the resultant **kubernetes** configuration: + +* The `anonymous-auth` (on `kube-apiserver`) is set to `true` by default. This is fine, because it is considered safe if you enable `RBAC` for the `authorization-mode`. +* The `enable-admission-plugins` has not the `PodSecurityPolicy` admission plugin. This because it is going to be definitely removed from **kubernetes** `v1.25`. For this reason we decided to set the newest `PodSecurity` (for more details, please take a look here: ). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work. +* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this). +* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: ). +* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top. +* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image: + ![kubelet hardening](img/kubelet-hardening.png) + +Once you have the file properly filled, you can run the **Ansible** command to start the installation: + +```bash +ansible-playbook -v cluster.yml \ + -i inventory.ini \ + -b --become-user=root \ + --private-key ~/.ssh/id_ecdsa \ + -e "@vars.yaml" \ + -e "@hardening.yaml" +``` + +**N.B.** The `vars.yaml` contains our general cluster information (SANs, load balancer, dns, etc..) and `hardening.yaml` is the file described above. + +Once completed the cluster deployment, don't forget to approve the generated certificates (check them with `kubectl get csr`, approve with `kubectl certificate approve `). This action is necessary because the `secureTLSBootstrap` option and `RotateKubeletServerCertificate` feature gate for `kubelet` are enabled (CIS [4.2.11](https://www.tenable.com/audits/items/CIS_Kubernetes_v1.20_v1.0.0_Level_1_Worker.audit:05af3dfbca8e0c3fb3559c6c7de29191), [4.2.12](https://www.tenable.com/audits/items/CIS_Kubernetes_v1.20_v1.0.0_Level_1_Worker.audit:5351c76f8c5bff8f98c29a5200a35435)). diff --git a/kubespray/docs/img/kubelet-hardening.png b/kubespray/docs/img/kubelet-hardening.png new file mode 100644 index 0000000..5546a8b Binary files /dev/null and b/kubespray/docs/img/kubelet-hardening.png differ diff --git a/kubespray/docs/img/kubernetes-logo.png b/kubespray/docs/img/kubernetes-logo.png new file mode 100644 index 0000000..2838a18 Binary files /dev/null and b/kubespray/docs/img/kubernetes-logo.png differ diff --git a/kubespray/docs/ingress_controller/alb_ingress_controller.md b/kubespray/docs/ingress_controller/alb_ingress_controller.md new file mode 100644 index 0000000..05edbee --- /dev/null +++ b/kubespray/docs/ingress_controller/alb_ingress_controller.md @@ -0,0 +1,43 @@ +# AWS ALB Ingress Controller + +**NOTE:** The current image version is `v1.1.6`. Please file any issues you find and note the version used. + +The AWS ALB Ingress Controller satisfies Kubernetes [ingress resources](https://kubernetes.io/docs/user-guide/ingress) by provisioning [Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html). + +This project was originated by [Ticketmaster](https://github.com/ticketmaster) and [CoreOS](https://github.com/coreos) as part of Ticketmaster's move to AWS and CoreOS Tectonic. Learn more about Ticketmaster's Kubernetes initiative from Justin Dean's video at [Tectonic Summit](https://www.youtube.com/watch?v=wqXVKneP0Hg). + +This project was donated to Kubernetes SIG-AWS to allow AWS, CoreOS, Ticketmaster and other SIG-AWS contributors to officially maintain the project. SIG-AWS reached this consensus on June 1, 2018. + +## Documentation + +Checkout our [Live Docs](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/)! + +## Getting started + +To get started with the controller, see our [walkthrough](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/walkthrough/echoserver/). + +## Setup + +- See [controller setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/controller/setup/) on how to install ALB ingress controller +- See [external-dns setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/external-dns/setup/) for how to setup the external-dns to manage route 53 records. + +## Building + +For details on building this project, see our [building guide](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/BUILDING/). + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack channel](https://kubernetes.slack.com/messages/sig-aws) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-aws) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +## License + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fcoreos%2Falb-ingress-controller.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fcoreos%2Falb-ingress-controller?ref=badge_large) diff --git a/kubespray/docs/ingress_controller/ingress_nginx.md b/kubespray/docs/ingress_controller/ingress_nginx.md new file mode 100644 index 0000000..a3c9725 --- /dev/null +++ b/kubespray/docs/ingress_controller/ingress_nginx.md @@ -0,0 +1,203 @@ +# Installation Guide + +## Contents + +- [Prerequisite Generic Deployment Command](#prerequisite-generic-deployment-command) + - [Provider Specific Steps](#provider-specific-steps) + - [Docker for Mac](#docker-for-mac) + - [minikube](#minikube) + - [AWS](#aws) + - [GCE - GKE](#gce-gke) + - [Azure](#azure) + - [Bare-metal](#bare-metal) + - [Verify installation](#verify-installation) + - [Detect installed version](#detect-installed-version) +- [Using Helm](#using-helm) + +## Prerequisite Generic Deployment Command + +!!! attention + The default configuration watches Ingress object from *all the namespaces*. + To change this behavior use the flag `--watch-namespace` to limit the scope to a particular namespace. + +!!! warning + If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. + +!!! attention + If you're using GKE you need to initialize your user as a cluster-admin with the following command: + +```console +kubectl create clusterrolebinding cluster-admin-binding \ +--clusterrole cluster-admin \ +--user $(gcloud config get-value account) +``` + +The following **Mandatory Command** is required for all deployments except for AWS. See below for the AWS version. + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.40.2/deploy/static/provider/cloud/deploy.yaml +``` + +### Provider Specific Steps + +There are cloud provider specific yaml files. + +#### Docker for Mac + +Kubernetes is available in Docker for Mac (from [version 18.06.0-ce](https://docs.docker.com/docker-for-mac/release-notes/#stable-releases-of-2018)) + +First you need to [enable kubernetes](https://docs.docker.com/docker-for-mac/#kubernetes). + +Then you have to create a service: + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml +``` + +#### minikube + +For standard usage: + +```console +minikube addons enable ingress +``` + +For development: + +1. Disable the ingress addon: + +```console +minikube addons disable ingress +``` + +1. Execute `make dev-env` +1. Confirm the `nginx-ingress-controller` deployment exists: + +```console +$ kubectl get pods -n ingress-nginx +NAME READY STATUS RESTARTS AGE +default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s +nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s +``` + +#### AWS + +In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`. +Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) +Please check the [elastic load balancing AWS details page](https://aws.amazon.com/elasticloadbalancing/details/) + +##### Elastic Load Balancer - ELB + +This setup requires to choose in which layer (L4 or L7) we want to configure the Load Balancer: + +- [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): Use an Network Load Balancer (NLB) with TCP as the listener protocol for ports 80 and 443. +- [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): Use an Elastic Load Balancer (ELB) with HTTP as the listener protocol for port 80 and terminate TLS in the ELB + +For L4: + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy.yaml +``` + +For L7: + +Change the value of `service.beta.kubernetes.io/aws-load-balancer-ssl-cert` in the file `provider/aws/deploy-tls-termination.yaml` replacing the dummy id with a valid one. The dummy value is `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"` + +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the [ELB Idle Timeouts section](#elb-idle-timeouts) for additional information. If a change is required, users will need to update the value of `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` in `provider/aws/deploy-tls-termination.yaml` + +Then execute: + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy-tls-termination.yaml +``` + +This example creates an ELB with just two listeners, one in port 80 and another in port 443 + +![Listeners](https://github.com/kubernetes/ingress-nginx/raw/master/docs/images/elb-l7-listener.png) + +##### ELB Idle Timeouts + +In some scenarios users will need to modify the value of the ELB idle timeout. +Users need to ensure the idle timeout is less than the [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) that is configured for NGINX. +By default NGINX `keepalive_timeout` is set to `75s`. + +The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified, +in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured. + +_Please Note: An idle timeout of `3600s` is recommended when using WebSockets._ + +More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html). + +##### Network Load Balancer (NLB) + +This type of load balancer is supported since v1.10.0 as an ALPHA feature. + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-nlb.yaml +``` + +#### GCE-GKE + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml +``` + +**Important Note:** proxy protocol is not supported in GCE/GKE + +#### Azure + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml +``` + +#### Bare-metal + +Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport): + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/service-nodeport.yaml +``` + +!!! tip + For extended notes regarding deployments on bare-metal, see [Bare-metal considerations](./baremetal.md). + +### Verify installation + +To check if the ingress controller pods have started, run the following command: + +```console +kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch +``` + +Once the operator pods are running, you can cancel the above command by typing `Ctrl+C`. +Now, you are ready to create your first ingress. + +### Detect installed version + +To detect which version of the ingress controller is running, exec into the pod and run `nginx-ingress-controller version` command. + +```console +POD_NAMESPACE=ingress-nginx +POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/component=controller -o jsonpath='{.items[0].metadata.name}') + +kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version +``` + +## Using Helm + +NGINX Ingress controller can be installed via [Helm](https://helm.sh/) using the chart [ingress-nginx/ingress-nginx](https://kubernetes.github.io/ingress-nginx). +Official documentation is [here](https://kubernetes.github.io/ingress-nginx/deploy/#using-helm) + +To install the chart with the release name `my-nginx`: + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm install my-nginx ingress-nginx/ingress-nginx +``` + +Detect installed version: + +```console +POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') +kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version +``` diff --git a/kubespray/docs/integration.md b/kubespray/docs/integration.md new file mode 100644 index 0000000..962a5f4 --- /dev/null +++ b/kubespray/docs/integration.md @@ -0,0 +1,188 @@ +# Kubespray (kubespray) in own ansible playbooks repo + +1. Fork [kubespray repo](https://github.com/kubernetes-sigs/kubespray) to your personal/organisation account on github. + Note: + * All forked public repos at github will be also public, so **never commit sensitive data to your public forks**. + * List of all forked repos could be retrieved from github page of original project. + +2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray): + + ```ShellSession + git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray + ``` + + Git will create `.gitmodules` file in your existent ansible repo: + + ```ini + [submodule "3d/kubespray"] + path = 3d/kubespray + url = https://github.com/YOUR_GITHUB/kubespray.git + ``` + +3. Configure git to show submodule status: + + ```ShellSession + git config --global status.submoduleSummary true + ``` + +4. Add *original* kubespray repo as upstream: + + ```ShellSession + cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git + ``` + +5. Sync your master branch with upstream: + + ```ShellSession + git checkout master + git fetch upstream + git merge upstream/master + git push origin master + ``` + +6. Create a new branch which you will use in your working environment: + + ```ShellSession + git checkout -b work + ``` + + ***Never*** use master branch of your repository for your commits. + +7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project), + if you had roles in your existing ansible project before, you can add the path to those separated with `:`: + + ```ini + ... + library = ./library/:3d/kubespray/library/ + roles_path = ./roles/:3d/kubespray/roles/ + ... + ``` + +8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project. + + You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup. + +9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming. + For example: + + ```ini + ... + #Kubespray groups: + [kube_node:children] + kubenode + + [k8s_cluster:children] + kubernetes + + [etcd:children] + kubemaster + kubemaster-ha + + [kube_control_plane:children] + kubemaster + kubemaster-ha + + [kubespray:children] + kubernetes + ``` + +* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project. + +10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file: + + ```yml + - name: Import kubespray playbook + ansible.builtin.import_playbook: 3d/kubespray/cluster.yml + ``` + + Or your could copy separate tasks from cluster.yml into your ansible repository. + +11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo. + + When you update your "work" branch you need to commit changes to ansible repo as well. +Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule. + +## Contributing + +If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo. + +1. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md). + +2. Change working directory to git submodule directory (3d/kubespray). + +3. Setup desired user.name and user.email for submodule. + + If kubespray is only one submodule in your repo you could use something like: + + ```ShellSession + git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"' + ``` + +4. Sync with upstream master: + + ```ShellSession + git fetch upstream + git merge upstream/master + git push origin master + ``` + +5. Create new branch for the specific fixes that you want to contribute: + + ```ShellSession + git checkout -b fixes-name-date-index + ``` + + Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs. + +6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo: + + ```ShellSession + git cherry-pick + ``` + +7. If you have several temporary-stage commits - squash them using [git rebase -i](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit) + + Also you could use interactive rebase + + ```ShellSession + git rebase -i HEAD~10 + ``` + + to delete commits which you don't want to contribute into original repo. + +8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work. + + Check that you're on correct branch: + + ```ShellSession + git status + ``` + + And pull changes from upstream (if any): + + ```ShellSession + git pull --rebase upstream master + ``` + +9. Now push your changes to your **fork** repo with + + ```ShellSession + git push + ``` + + If your branch doesn't exists on github, git will propose you to use something like + + ```ShellSession + git push --set-upstream origin fixes-name-date-index + ``` + +10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using + + ```ShellSession + git push origin --delete fixes-name-date-index + git branch -D fixes-name-date-index + ``` + + and start whole process from the beginning. + + If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation. diff --git a/kubespray/docs/kata-containers.md b/kubespray/docs/kata-containers.md new file mode 100644 index 0000000..30843fd --- /dev/null +++ b/kubespray/docs/kata-containers.md @@ -0,0 +1,101 @@ +# Kata Containers + +[Kata Containers](https://katacontainers.io) is a secure container runtime with lightweight virtual machines that supports multiple hypervisor solutions. + +## Hypervisors + +_Qemu_ is the only hypervisor supported by Kubespray. + +## Installation + +To enable Kata Containers, set the following variables: + +**k8s-cluster.yml**: + +```yaml +container_manager: containerd +kata_containers_enabled: true +``` + +**etcd.yml**: + +```yaml +etcd_deployment_type: host +``` + +## Usage + +By default, runc is used for pods. +Kubespray generates the runtimeClass kata-qemu, and it is necessary to specify it as +the runtimeClassName of a pod spec to use Kata Containers: + +```shell +$ kubectl get runtimeclass +NAME HANDLER AGE +kata-qemu kata-qemu 3m34s +$ +$ cat nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + runtimeClassName: kata-qemu + containers: + - name: nginx + image: nginx:1.14.2 +$ +$ kubectl apply -f nginx.yaml +``` + +## Configuration + +### Recommended : Pod Overhead + +[Pod Overhead](https://kubernetes.io/docs/concepts/configuration/pod-overhead/) is a feature for accounting for the resources consumed by the Runtime Class used by the Pod. + +When this feature is enabled, Kubernetes will count the fixed amount of CPU and memory set in the configuration as used by the virtual machine and not by the containers running in the Pod. + +Pod Overhead is mandatory if you run Pods with Kata Containers that use [resources limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits). + +**Set cgroup driver**: + +To enable Pod Overhead feature you have to configure Kubelet with the appropriate cgroup driver, using the following configuration: + +`cgroupfs` works best: + +```yaml +kubelet_cgroup_driver: cgroupfs +``` + +... but when using `cgroups v2` (see ) you can use systemd as well: + +```yaml +kubelet_cgroup_driver: systemd +``` + +**Qemu hypervisor configuration**: + +The configuration for the Qemu hypervisor uses the following values: + +```yaml +kata_containers_qemu_overhead: true +kata_containers_qemu_overhead_fixed_cpu: 10m +kata_containers_qemu_overhead_fixed_memory: 290Mi +``` + +### Optional : Select Kata Containers version + +Optionally you can select the Kata Containers release version to be installed. The available releases are published in [GitHub](https://github.com/kata-containers/kata-containers/releases). + +```yaml +kata_containers_version: 2.2.2 +``` + +### Optional : Debug + +Debug is disabled by default for all the components of Kata Containers. You can change this behaviour with the following configuration: + +```yaml +kata_containers_qemu_debug: 'false' +``` diff --git a/kubespray/docs/kube-ovn.md b/kubespray/docs/kube-ovn.md new file mode 100644 index 0000000..3ddc270 --- /dev/null +++ b/kubespray/docs/kube-ovn.md @@ -0,0 +1,55 @@ +# Kube-OVN + +Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises. + +For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn) + +**Warning:** Kernel version (`cat /proc/version`) needs to be different than `3.10.0-862` or kube-ovn won't start and will print this message: + +```bash +kernel version 3.10.0-862 has a nat related bug that will affect ovs function, please update to a version greater than 3.10.0-898 +``` + +## How to use it + +Enable kube-ovn in `group_vars/k8s_cluster/k8s_cluster.yml` + +```yml +... +kube_network_plugin: kube-ovn +... +``` + +## Verifying kube-ovn install + +Kube-OVN run ovn and controller in `kube-ovn` namespace + +* Check the status of kube-ovn pods + +```ShellSession +# From the CLI +kubectl get pod -n kube-ovn + +# Output +NAME READY STATUS RESTARTS AGE +kube-ovn-cni-49lsm 1/1 Running 0 2d20h +kube-ovn-cni-9db8f 1/1 Running 0 2d20h +kube-ovn-cni-wftdk 1/1 Running 0 2d20h +kube-ovn-controller-68d7bb48bd-7tnvg 1/1 Running 0 2d21h +ovn-central-6675dbb7d9-d7z8m 1/1 Running 0 4d16h +ovs-ovn-hqn8p 1/1 Running 0 4d16h +ovs-ovn-hvpl8 1/1 Running 0 4d16h +ovs-ovn-r5frh 1/1 Running 0 4d16h +``` + +* Check the default and node subnet + +```ShellSession +# From the CLI +kubectl get subnet + +# Output +NAME PROTOCOL CIDR PRIVATE NAT +join IPv4 100.64.0.0/16 false false +ovn-default IPv4 10.16.0.0/16 false true +``` diff --git a/kubespray/docs/kube-router.md b/kubespray/docs/kube-router.md new file mode 100644 index 0000000..6a32834 --- /dev/null +++ b/kubespray/docs/kube-router.md @@ -0,0 +1,79 @@ +# Kube-router + +Kube-router is a L3 CNI provider, as such it will setup IPv4 routing between +nodes to provide Pods' networks reachability. + +See [kube-router documentation](https://www.kube-router.io/). + +## Verifying kube-router install + +Kube-router runs its pods as a `DaemonSet` in the `kube-system` namespace: + +* Check the status of kube-router pods + +```ShellSession +# From the CLI +kubectl get pod --namespace=kube-system -l k8s-app=kube-router -owide + +# output +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE +kube-router-4f679 1/1 Running 0 2d 192.168.186.4 mykube-k8s-node-nf-2 +kube-router-5slf8 1/1 Running 0 2d 192.168.186.11 mykube-k8s-node-nf-3 +kube-router-lb6k2 1/1 Running 0 20h 192.168.186.14 mykube-k8s-node-nf-6 +kube-router-rzvrb 1/1 Running 0 20h 192.168.186.17 mykube-k8s-node-nf-4 +kube-router-v6n56 1/1 Running 0 2d 192.168.186.6 mykube-k8s-node-nf-1 +kube-router-wwhg8 1/1 Running 0 20h 192.168.186.16 mykube-k8s-node-nf-5 +kube-router-x2xs7 1/1 Running 0 2d 192.168.186.10 mykube-k8s-master-1 +``` + +* Peek at kube-router container logs: + +```ShellSession +# From the CLI +kubectl logs --namespace=kube-system -l k8s-app=kube-router | grep Peer.Up + +# output +time="2018-09-17T16:47:14Z" level=info msg="Peer Up" Key=192.168.186.6 State=BGP_FSM_OPENCONFIRM Topic=Peer +time="2018-09-17T16:47:16Z" level=info msg="Peer Up" Key=192.168.186.11 State=BGP_FSM_OPENCONFIRM Topic=Peer +time="2018-09-17T16:47:46Z" level=info msg="Peer Up" Key=192.168.186.10 State=BGP_FSM_OPENCONFIRM Topic=Peer +time="2018-09-18T19:12:24Z" level=info msg="Peer Up" Key=192.168.186.14 State=BGP_FSM_OPENCONFIRM Topic=Peer +time="2018-09-18T19:12:28Z" level=info msg="Peer Up" Key=192.168.186.17 State=BGP_FSM_OPENCONFIRM Topic=Peer +time="2018-09-18T19:12:38Z" level=info msg="Peer Up" Key=192.168.186.16 State=BGP_FSM_OPENCONFIRM Topic=Peer +[...] +``` + +## Gathering kube-router state + +Kube-router Pods come bundled with a "Pod Toolbox" which provides very +useful internal state views for: + +* IPVS: via `ipvsadm` +* BGP peering and routing info: via `gobgp` + +You need to `kubectl exec -it ...` into a kube-router container to use these, see + for details. + +## Kube-router configuration + +You can change the default configuration by overriding `kube_router_...` variables +(as found at `roles/network_plugin/kube-router/defaults/main.yml`), +these are named to follow `kube-router` command-line options as per +. + +## Advanced BGP Capabilities + + + +If you have other networking devices or SDN systems that talk BGP, kube-router will fit in perfectly. +From a simple full node-to-node mesh to per-node peering configurations, most routing needs can be attained. +The configuration is Kubernetes native (annotations) just like the rest of kube-router. + +For more details please refer to the + +Next options will set up annotations for kube-router, using `kubectl annotate` command. + +```yml +kube_router_annotations_master: [] +kube_router_annotations_node: [] +kube_router_annotations_all: [] +``` diff --git a/kubespray/docs/kube-vip.md b/kubespray/docs/kube-vip.md new file mode 100644 index 0000000..9ba402d --- /dev/null +++ b/kubespray/docs/kube-vip.md @@ -0,0 +1,60 @@ +# kube-vip + +kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software. + +## Prerequisites + +You have to configure `kube_proxy_strict_arp` when the kube_proxy_mode is `ipvs` and kube-vip ARP is enabled. + +```yaml +kube_proxy_strict_arp: true +``` + +## Install + +You have to explicitly enable the kube-vip extension: + +```yaml +kube_vip_enabled: true +``` + +You also need to enable +[kube-vip as HA, Load Balancer, or both](https://kube-vip.io/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both): + +```yaml +# HA for control-plane, requires a VIP +kube_vip_controlplane_enabled: true +kube_vip_address: 10.42.42.42 +loadbalancer_apiserver: + address: "{{ kube_vip_address }}" + port: 6443 +# kube_vip_interface: ens160 + +# LoadBalancer for services +kube_vip_services_enabled: false +# kube_vip_services_interface: ens320 +``` + +> Note: When using `kube-vip` as LoadBalancer for services, +[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/) +are needed. + +If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) : + +```yaml +kube_vip_arp_enabled: true +``` + +If using [BGP mode](https://kube-vip.io/docs/installation/static/#bgp) : + +```yaml +kube_vip_bgp_enabled: true +kube_vip_local_as: 65000 +kube_vip_bgp_routerid: 192.168.0.2 +kube_vip_bgppeers: +- 192.168.0.10:65000::false +- 192.168.0.11:65000::false +# kube_vip_bgp_peeraddress: +# kube_vip_bgp_peerpass: +# kube_vip_bgp_peeras: +``` diff --git a/kubespray/docs/kubernetes-apps/cephfs_provisioner.md b/kubespray/docs/kubernetes-apps/cephfs_provisioner.md new file mode 100644 index 0000000..c5c18db --- /dev/null +++ b/kubespray/docs/kubernetes-apps/cephfs_provisioner.md @@ -0,0 +1,73 @@ +# CephFS Volume Provisioner for Kubernetes 1.5+ + +[![Docker Repository on Quay](https://quay.io/repository/external_storage/cephfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/external_storage/cephfs-provisioner) + +Using Ceph volume client + +## Development + +Compile the provisioner + +``` console +make +``` + +Make the container image and push to the registry + +``` console +make push +``` + +## Test instruction + +- Start Kubernetes local cluster + +See [Kubernetes](https://kubernetes.io/) + +- Create a Ceph admin secret + +``` bash +ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret +kubectl create ns cephfs +kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=cephfs +``` + +- Start CephFS provisioner + +The following example uses `cephfs-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity. + +``` bash +docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host cephfs-provisioner /usr/local/bin/cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=cephfs-provisioner-1 +``` + +Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md). + +- Create a CephFS Storage Class + +Replace Ceph monitor's IP in [example class](example/class.yaml) with your own and create storage class: + +``` bash +kubectl create -f example/class.yaml +``` + +- Create a claim + +``` bash +kubectl create -f example/claim.yaml +``` + +- Create a Pod using the claim + +``` bash +kubectl create -f example/test-pod.yaml +``` + +## Known limitations + +- Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work. +- Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated. +- Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount. + +## Acknowledgement + +Inspired by CephFS Manila provisioner and conversation with John Spray diff --git a/kubespray/docs/kubernetes-apps/local_volume_provisioner.md b/kubespray/docs/kubernetes-apps/local_volume_provisioner.md new file mode 100644 index 0000000..e9c6225 --- /dev/null +++ b/kubespray/docs/kubernetes-apps/local_volume_provisioner.md @@ -0,0 +1,131 @@ +# Local Static Storage Provisioner + +The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) +is NOT a dynamic storage provisioner as you would +expect from a cloud provider. Instead, it simply creates PersistentVolumes for +all mounts under the `host_dir` of the specified storage class. +These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary. + +Example: + +```yaml +local_volume_provisioner_storage_classes: + local-storage: + host_dir: /mnt/disks + mount_dir: /mnt/disks + fast-disks: + host_dir: /mnt/fast-disks + mount_dir: /mnt/fast-disks + block_cleaner_command: + - "/scripts/shred.sh" + - "2" + volume_mode: Filesystem + fs_type: ext4 +``` + +For each key in `local_volume_provisioner_storage_classes` a "storage class" with +the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`. +The subkeys of each storage class in `local_volume_provisioner_storage_classes` +are converted to camelCase and added as attributes to the storage class in the +ConfigMap. + +The result of the above example is: + +```yaml +data: + storageClassMap: | + local-storage: + hostDir: /mnt/disks + mountDir: /mnt/disks + fast-disks: + hostDir: /mnt/fast-disks + mountDir: /mnt/fast-disks + blockCleanerCommand: + - "/scripts/shred.sh" + - "2" + volumeMode: Filesystem + fsType: ext4 +``` + +Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also +created for each storage class: + +```bash +$ kubectl get storageclasses.storage.k8s.io +NAME PROVISIONER RECLAIMPOLICY +fast-disks kubernetes.io/no-provisioner Delete +local-storage kubernetes.io/no-provisioner Delete +``` + +The default StorageClass is `local-storage` on `/mnt/disks`; +the rest of this documentation will use that path as an example. + +## Examples to create local storage volumes + +1. Using tmpfs + + ```bash + for vol in vol1 vol2 vol3; do + mkdir /mnt/disks/$vol + mount -t tmpfs -o size=5G $vol /mnt/disks/$vol + done + ``` + + The tmpfs method is not recommended for production because the mounts are not + persistent and data will be deleted on reboot. + +1. Mount physical disks + + ```bash + mkdir /mnt/disks/ssd1 + mount /dev/vdb1 /mnt/disks/ssd1 + ``` + + Physical disks are recommended for production environments because it offers + complete isolation in terms of I/O and capacity. + +1. Mount unpartitioned physical devices + + ```bash + for disk in /dev/sdc /dev/sdd /dev/sde; do + ln -s $disk /mnt/disks + done + ``` + + This saves time of precreating filesystems. Note that your storageclass must have + `volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the + disk will be added as a raw block device. + +1. PersistentVolumes with `volumeMode="Block"` + + Just like above, you can create PersistentVolumes with volumeMode `Block` + by creating a symbolic link under discovery directory to the block device on + the node, if you set `volume_mode` to `"Block"`. This will create a volume + presented into a Pod as a block device, without any filesystem on it. + +1. File-backed sparsefile method + + ```bash + truncate /mnt/disks/disk5 --size 2G + mkfs.ext4 /mnt/disks/disk5 + mkdir /mnt/disks/vol5 + mount /mnt/disks/disk5 /mnt/disks/vol5 + ``` + + If you have a development environment and only one disk, this is the best way + to limit the quota of persistent volumes. + +1. Simple directories + + In a development environment, using `mount --bind` works also, but there is no capacity + management. + +## Usage notes + +Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for +Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be +able to start if the mounts become unavailable. + +## Further reading + +Refer to the upstream docs here: diff --git a/kubespray/docs/kubernetes-apps/rbd_provisioner.md b/kubespray/docs/kubernetes-apps/rbd_provisioner.md new file mode 100644 index 0000000..dcb883d --- /dev/null +++ b/kubespray/docs/kubernetes-apps/rbd_provisioner.md @@ -0,0 +1,79 @@ +# RBD Volume Provisioner for Kubernetes 1.5+ + +`rbd-provisioner` is an out-of-tree dynamic provisioner for Kubernetes 1.5+. +You can use it quickly & easily deploy ceph RBD storage that works almost +anywhere. + +It works just like in-tree dynamic provisioner. For more information on how +dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/) +or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html). + +## Development + +Compile the provisioner + +```console +make +``` + +Make the container image and push to the registry + +```console +make push +``` + +## Test instruction + +* Start Kubernetes local cluster + +See [Kubernetes](https://kubernetes.io/). + +* Create a Ceph admin secret + +```bash +ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret +kubectl create secret generic ceph-admin-secret --from-file=/tmp/secret --namespace=kube-system +``` + +* Create a Ceph pool and a user secret + +```bash +ceph osd pool create kube 8 8 +ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kube' +ceph auth get-key client.kube > /tmp/secret +kubectl create secret generic ceph-secret --from-file=/tmp/secret --namespace=kube-system +``` + +* Start RBD provisioner + +The following example uses `rbd-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity. + +```bash +docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host quay.io/external_storage/rbd-provisioner /usr/local/bin/rbd-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=rbd-provisioner-1 +``` + +Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md). + +* Create a RBD Storage Class + +Replace Ceph monitor's IP in [examples/class.yaml](examples/class.yaml) with your own and create storage class: + +```bash +kubectl create -f examples/class.yaml +``` + +* Create a claim + +```bash +kubectl create -f examples/claim.yaml +``` + +* Create a Pod using the claim + +```bash +kubectl create -f examples/test-pod.yaml +``` + +## Acknowledgements + +* This provisioner is extracted from [Kubernetes core](https://github.com/kubernetes/kubernetes) with some modifications for this project. diff --git a/kubespray/docs/kubernetes-apps/registry.md b/kubespray/docs/kubernetes-apps/registry.md new file mode 100644 index 0000000..182f10a --- /dev/null +++ b/kubespray/docs/kubernetes-apps/registry.md @@ -0,0 +1,244 @@ +# Private Docker Registry in Kubernetes + +Kubernetes offers an optional private Docker registry addon, which you can turn +on when you bring up a cluster or install later. This gives you a place to +store truly private Docker images for your cluster. + +## How it works + +The private registry runs as a `Pod` in your cluster. It does not currently +support SSL or authentication, which triggers Docker's "insecure registry" +logic. To work around this, we run a proxy on each node in the cluster, +exposing a port onto the node (via a hostPort), which Docker accepts as +"secure", since it is accessed by `localhost`. + +## Turning it on + +Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The +`ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs +whether the registry is run or not. To set this flag, you can specify +`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster +does not include this flag, the following steps should work. Note that some of +this is cloud-provider specific, so you may have to customize it a bit. + +### Make some storage + +The primary job of the registry is to store data. To do that we have to decide +where to store it. For cloud environments that have networked storage, we can +use Kubernetes's `PersistentVolume` abstraction. The following template is +expanded by `salt` in the GCE cluster turnup, but can easily be adapted to +other situations: + +```yaml +kind: PersistentVolume +apiVersion: v1 +metadata: + name: kube-system-kube-registry-pv +spec: +{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %} + capacity: + storage: {{ pillar['cluster_registry_disk_size'] }} + accessModes: + - ReadWriteOnce + gcePersistentDisk: + pdName: "{{ pillar['cluster_registry_disk_name'] }}" + fsType: "ext4" +{% endif %} +``` + +If, for example, you wanted to use NFS you would just need to change the +`gcePersistentDisk` block to `nfs`. See +[here](https://kubernetes.io/docs/concepts/storage/volumes/) for more details on volumes. + +Note that in any case, the storage (in the case the GCE PersistentDisk) must be +created independently - this is not something Kubernetes manages for you (yet). + +### I don't want or don't have persistent storage + +If you are running in a place that doesn't have networked storage, or if you +just want to kick the tires on this without committing to it, you can easily +adapt the `ReplicationController` specification below to use a simple +`emptyDir` volume instead of a `persistentVolumeClaim`. + +## Claim the storage + +Now that the Kubernetes cluster knows that some storage exists, you can put a +claim on that storage. As with the `PersistentVolume` above, you can start +with the `salt` template: + +```yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: kube-registry-pvc + namespace: kube-system +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ pillar['cluster_registry_disk_size'] }} +``` + +This tells Kubernetes that you want to use storage, and the `PersistentVolume` +you created before will be bound to this claim (unless you have other +`PersistentVolumes` in which case those might get bound instead). This claim +gives you the right to use this storage until you release the claim. + +## Run the registry + +Now we can run a Docker registry: + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-registry-v0 + namespace: kube-system + labels: + k8s-app: registry + version: v0 +spec: + replicas: 1 + selector: + k8s-app: registry + version: v0 + template: + metadata: + labels: + k8s-app: registry + version: v0 + spec: + containers: + - name: registry + image: registry:2 + resources: + limits: + cpu: 100m + memory: 100Mi + env: + - name: REGISTRY_HTTP_ADDR + value: :5000 + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: /var/lib/registry + volumeMounts: + - name: image-store + mountPath: /var/lib/registry + ports: + - containerPort: 5000 + name: registry + protocol: TCP + volumes: + - name: image-store + persistentVolumeClaim: + claimName: kube-registry-pvc +``` + +*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode. + +## Expose the registry in the cluster + +Now that we have a registry `Pod` running, we can expose it as a Service: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-registry + namespace: kube-system + labels: + k8s-app: registry + kubernetes.io/name: "KubeRegistry" +spec: + selector: + k8s-app: registry + ports: + - name: registry + port: 5000 + protocol: TCP +``` + +## Expose the registry on each node + +Now that we have a running `Service`, we need to expose it onto each Kubernetes +`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every +node by creating following daemonset. + +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-registry-proxy + namespace: kube-system + labels: + k8s-app: kube-registry-proxy + version: v0.4 +spec: + template: + metadata: + labels: + k8s-app: kube-registry-proxy + kubernetes.io/name: "kube-registry-proxy" + version: v0.4 + spec: + containers: + - name: kube-registry-proxy + image: gcr.io/google_containers/kube-registry-proxy:0.4 + resources: + limits: + cpu: 100m + memory: 50Mi + env: + - name: REGISTRY_HOST + value: kube-registry.kube-system.svc.cluster.local + - name: REGISTRY_PORT + value: "5000" + ports: + - name: registry + containerPort: 80 + hostPort: 5000 +``` + +When modifying replication-controller, service and daemon-set definitions, take +care to ensure *unique* identifiers for the rc-svc couple and the daemon-set. +Failing to do so will have register the localhost proxy daemon-sets to the +upstream service. As a result they will then try to proxy themselves, which +will, for obvious reasons, not work. + +This ensures that port 5000 on each node is directed to the registry `Service`. +You should be able to verify that it is running by hitting port 5000 with a web +browser and getting a 404 error: + +```ShellSession +$ curl localhost:5000 +404 page not found +``` + +## Using the registry + +To use an image hosted by this registry, simply say this in your `Pod`'s +`spec.containers[].image` field: + +```yaml + image: localhost:5000/user/container +``` + +Before you can use the registry, you have to be able to get images into it, +though. If you are building an image on your Kubernetes `Node`, you can spell +out `localhost:5000` when you build and push. More likely, though, you are +building locally and want to push to your cluster. + +You can use `kubectl` to set up a port-forward from your local node to a +running Pod: + +```ShellSession +$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \ + -o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \ + | grep Running | head -1 | cut -f1 -d' ') + +$ kubectl port-forward --namespace kube-system $POD 5000:5000 & +``` + +Now you can build and push images on your local computer as +`localhost:5000/yourname/container` and those images will be available inside +your kubernetes cluster with the same name. diff --git a/kubespray/docs/kubernetes-reliability.md b/kubespray/docs/kubernetes-reliability.md new file mode 100644 index 0000000..149ec84 --- /dev/null +++ b/kubespray/docs/kubernetes-reliability.md @@ -0,0 +1,108 @@ +# Overview + +Distributed system such as Kubernetes are designed to be resilient to the +failures. More details about Kubernetes High-Availability (HA) may be found at +[Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/) + +To have a simple view the most of parts of HA will be skipped to describe +Kubelet<->Controller Manager communication only. + +By default the normal behavior looks like: + +1. Kubelet updates it status to apiserver periodically, as specified by + `--node-status-update-frequency`. The default value is **10s**. + +2. Kubernetes controller manager checks the statuses of Kubelet every + `–-node-monitor-period`. The default value is **5s**. + +3. In case the status is updated within `--node-monitor-grace-period` of time, + Kubernetes controller manager considers healthy status of Kubelet. The + default value is **40s**. + +> Kubernetes controller manager and Kubelet work asynchronously. It means that +> the delay may include any network latency, API Server latency, etcd latency, +> latency caused by load on one's control plane nodes and so on. So if +> `--node-status-update-frequency` is set to 5s in reality it may appear in +> etcd in 6-7 seconds or even longer when etcd cannot commit data to quorum +> nodes. + +## Failure + +Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently +`nodeStatusUpdateRetry` is constantly set to 5 in +[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102). + +Kubelet will try to update the status in +[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L312) +function. Kubelet uses `http.Client()` Golang method, but has no specified +timeout. Thus there may be some glitches when API Server is overloaded while +TCP connection is established. + +So, there will be `nodeStatusUpdateRetry` * `--node-status-update-frequency` +attempts to set a status of node. + +At the same time Kubernetes controller manager will try to check +`nodeStatusUpdateRetry` times every `--node-monitor-period` of time. After +`--node-monitor-grace-period` it will consider node unhealthy. Pods will then be rescheduled based on the +[Taint Based Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions) +timers that you set on them individually, or the API Server's global timers:`--default-not-ready-toleration-seconds` & +``--default-unreachable-toleration-seconds``. + +Kube proxy has a watcher over API. Once pods are evicted, Kube proxy will +notice and will update iptables of the node. It will remove endpoints from +services so pods from failed node won't be accessible anymore. + +## Recommendations for different cases + +## Fast Update and Fast Reaction + +If `--node-status-update-frequency` is set to **4s** (10s is default). +`--node-monitor-period` to **2s** (5s is default). +`--node-monitor-grace-period` to **20s** (40s is default). +`--default-not-ready-toleration-seconds` and ``--default-unreachable-toleration-seconds`` are set to **30** +(300 seconds is default). Note these two values should be integers representing the number of seconds ("s" or "m" for +seconds\minutes are not specified). + +In such scenario, pods will be evicted in **50s** because the node will be +considered as down after **20s**, and `--default-not-ready-toleration-seconds` or +``--default-unreachable-toleration-seconds`` occur after **30s** more. However, this scenario creates an overhead on +etcd as every node will try to update its status every 2 seconds. + +If the environment has 1000 nodes, there will be 15000 node updates per +minute which may require large etcd containers or even dedicated nodes for etcd. + +> If we calculate the number of tries, the division will give 5, but in reality +> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The +> total number of attempts will vary from 15 to 25 due to latency of all +> components. + +## Medium Update and Average Reaction + +Let's set `--node-status-update-frequency` to **20s** +`--node-monitor-grace-period` to **2m** and `--default-not-ready-toleration-seconds` and +``--default-unreachable-toleration-seconds`` to **60**. +In that case, Kubelet will try to update status every 20s. So, it will be 6 * 5 += 30 attempts before Kubernetes controller manager will consider unhealthy +status of node. After 1m it will evict all pods. The total time will be 3m +before eviction process. + +Such scenario is good for medium environments as 1000 nodes will require 3000 +etcd updates per minute. + +> In reality, there will be from 4 to 6 node update tries. The total number of +> of attempts will vary from 20 to 30. + +## Low Update and Slow reaction + +Let's set `--node-status-update-frequency` to **1m**. +`--node-monitor-grace-period` will set to **5m** and `--default-not-ready-toleration-seconds` and +``--default-unreachable-toleration-seconds`` to **60**. In this scenario, every kubelet will try to update the status +every minute. There will be 5 * 5 = 25 attempts before unhealthy status. After 5m, +Kubernetes controller manager will set unhealthy status. This means that pods +will be evicted after 1m after being marked unhealthy. (6m in total). + +> In reality, there will be from 3 to 5 tries. The total number of attempt will +> vary from 15 to 25. + +There can be different combinations such as Fast Update with Slow reaction to +satisfy specific cases. diff --git a/kubespray/docs/kylinlinux.md b/kubespray/docs/kylinlinux.md new file mode 100644 index 0000000..87dceff --- /dev/null +++ b/kubespray/docs/kylinlinux.md @@ -0,0 +1,11 @@ +# Kylin Linux + +Kylin Linux is supported with docker and containerd runtimes. + +**Note:** that Kylin Linux is not currently covered in kubespray CI and +support for it is currently considered experimental. + +At present, only `Kylin Linux Advanced Server V10 (Sword)` has been adapted, which can support the deployment of aarch64 and x86_64 platforms. + +There are no special considerations for using Kylin Linux as the target OS +for Kubespray deployments. diff --git a/kubespray/docs/large-deployments.md b/kubespray/docs/large-deployments.md new file mode 100644 index 0000000..7acbff3 --- /dev/null +++ b/kubespray/docs/large-deployments.md @@ -0,0 +1,52 @@ +Large deployments of K8s +======================== + +For a large scaled deployments, consider the following configuration changes: + +* Tune [ansible settings](https://docs.ansible.com/ansible/latest/intro_configuration.html) + for `forks` and `timeout` vars to fit large numbers of nodes being deployed. + +* Override containers' `foo_image_repo` vars to point to intranet registry. + +* Override the ``download_run_once: true`` and/or ``download_localhost: true``. + See download modes for details. + +* Adjust the `retry_stagger` global var as appropriate. It should provide sane + load on a delegate (the first K8s control plane node) then retrying failed + push or download operations. + +* Tune parameters for DNS related applications + Those are ``dns_replicas``, ``dns_cpu_limit``, + ``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``. + Please note that limits must always be greater than or equal to requests. + +* Tune CPU/memory limits and requests. Those are located in roles' defaults + and named like ``foo_memory_limit``, ``foo_memory_requests`` and + ``foo_cpu_limit``, ``foo_cpu_requests``. Note that 'Mi' memory units for K8s + will be submitted as 'M', if applied for ``docker run``, and cpu K8s units + will end up with the 'm' skipped for docker as well. This is required as + docker does not understand k8s units well. + +* Tune ``kubelet_status_update_frequency`` to increase reliability of kubelet. + ``kube_controller_node_monitor_grace_period``, + ``kube_controller_node_monitor_period``, + ``kube_apiserver_pod_eviction_not_ready_timeout_seconds`` & + ``kube_apiserver_pod_eviction_unreachable_timeout_seconds`` for better Kubernetes reliability. + Check out [Kubernetes Reliability](/docs/kubernetes-reliability.md) + +* Tune network prefix sizes. Those are ``kube_network_node_prefix``, + ``kube_service_addresses`` and ``kube_pods_subnet``. + +* Add calico_rr nodes if you are deploying with Calico or Canal. Nodes recover + from host/network interruption much quicker with calico_rr. + +* Check out the + [Inventory](/docs/getting-started.md#building-your-own-inventory) + section of the Getting started guide for tips on creating a large scale + Ansible inventory. + +* Override the ``etcd_events_cluster_setup: true`` store events in a separate + dedicated etcd instance. + +For example, when deploying 200 nodes, you may want to run ansible with +``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``. diff --git a/kubespray/docs/macvlan.md b/kubespray/docs/macvlan.md new file mode 100644 index 0000000..2d0de07 --- /dev/null +++ b/kubespray/docs/macvlan.md @@ -0,0 +1,41 @@ +# Macvlan + +## How to use it + +* Enable macvlan in `group_vars/k8s_cluster/k8s_cluster.yml` + +```yml +... +kube_network_plugin: macvlan +... +``` + +* Adjust the `macvlan_interface` in `group_vars/k8s_cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file: + +```yml +all: + hosts: + node1: + ip: 10.2.2.1 + access_ip: 10.2.2.1 + ansible_host: 10.2.2.1 + macvlan_interface: ens5 +``` + +## Issue encountered + +* Service DNS + +reply from unexpected source: + +add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml` + +* Disable nodelocaldns + +The nodelocal dns IP is not reacheable. + +Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml` + +```yml +enable_nodelocaldns: false +``` diff --git a/kubespray/docs/metallb.md b/kubespray/docs/metallb.md new file mode 100644 index 0000000..faeb351 --- /dev/null +++ b/kubespray/docs/metallb.md @@ -0,0 +1,106 @@ +# MetalLB + +MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. +It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers. +The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode. + +## Prerequisites + +You have to configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface for MetalLB to work. + +```yaml +kube_proxy_strict_arp: true +``` + +## Install + +You have to explicitly enable the MetalLB extension and set an IP address range from which to allocate LoadBalancer IPs. + +```yaml +metallb_enabled: true +metallb_speaker_enabled: true +metallb_avoid_buggy_ips: true +metallb_ip_range: + - 10.5.0.0/16 +``` + +By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller: + +```yaml +metallb_controller_tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + value: "" + effect: "NoSchedule" +``` + +## BGP Mode + +When operating in BGP Mode MetalLB needs to have defined upstream peers: + +```yaml +metallb_protocol: bgp +metallb_ip_range: + - 10.5.0.0/16 +metallb_peers: + - peer_address: 192.0.2.1 + peer_asn: 64512 + my_asn: 4200000000 + - peer_address: 192.0.2.2 + peer_asn: 64513 + my_asn: 4200000000 +``` + +Some upstream BGP peers may require password authentication: + +```yaml +metallb_protocol: bgp +metallb_ip_range: + - 10.5.0.0/16 +metallb_peers: + - peer_address: 192.0.2.1 + peer_asn: 64512 + my_asn: 4200000000 + password: "changeme" +``` + +When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement. +See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses). +In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range` + +```yaml +metallb_speaker_enabled: false +metallb_avoid_buggy_ips: true +metallb_ip_range: + - 10.5.0.0/16 +calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}" +``` + +If you have additional loadbalancer IP pool in `metallb_additional_address_pools` , ensure to add them to the list. + +```yaml +metallb_speaker_enabled: false +metallb_ip_range: + - 10.5.0.0/16 +metallb_additional_address_pools: + kube_service_pool_1: + ip_range: + - 10.6.0.0/16 + protocol: "bgp" + auto_assign: false + avoid_buggy_ips: true + kube_service_pool_2: + ip_range: + - 10.10.0.0/16 + protocol: "bgp" + auto_assign: false + avoid_buggy_ips: true +calico_advertise_service_loadbalancer_ips: + - 10.5.0.0/16 + - 10.6.0.0/16 + - 10.10.0.0/16 +``` diff --git a/kubespray/docs/mirror.md b/kubespray/docs/mirror.md new file mode 100644 index 0000000..3138d20 --- /dev/null +++ b/kubespray/docs/mirror.md @@ -0,0 +1,66 @@ +# Public Download Mirror + +The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China). + +## Configuring Kubespray to use a mirror site + +You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like: + +```shell +gcr_image_repo: "gcr.m.daocloud.io" +kube_image_repo: "k8s.m.daocloud.io" +docker_image_repo: "docker.m.daocloud.io" +quay_image_repo: "quay.m.daocloud.io" +github_image_repo: "ghcr.m.daocloud.io" + +files_repo: "https://files.m.daocloud.io" +``` + +Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security. +You can replace the `m.daocloud.io` with any site you want. + +## Example Usage Full Steps + +You can follow the full steps to use the kubesray with mirror. for example: + +Install Ansible according to Ansible installation guide then run the following steps: + +```shell +# Copy ``inventory/sample`` as ``inventory/mycluster`` +cp -rfp inventory/sample inventory/mycluster + +# Update Ansible inventory file with inventory builder +declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) +CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]} + +# Use the download mirror +cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml +sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml +tee -a inventory/mycluster/group_vars/all/mirror.yml < diff --git a/kubespray/docs/netcheck.md b/kubespray/docs/netcheck.md new file mode 100644 index 0000000..6a1bf80 --- /dev/null +++ b/kubespray/docs/netcheck.md @@ -0,0 +1,41 @@ +# Network Checker Application + +With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a +Network Checker Application from the 3rd side `mirantis/k8s-netchecker` docker +images. It consists of the server and agents trying to reach the server by usual +for Kubernetes applications network connectivity meanings. Therefore, this +automatically verifies a pod to pod connectivity via the cluster IP and checks +if DNS resolve is functioning as well. + +The checks are run by agents on a periodic basis and cover standard and host network +pods as well. The history of performed checks may be found in the agents' application +logs. + +To get the most recent and cluster-wide network connectivity report, run from +any of the cluster nodes: + +```ShellSession +curl http://localhost:31081/api/v1/connectivity_check +``` + +Note that Kubespray does not invoke the check but only deploys the application, if +requested. + +There are related application specific variables: + +```yml +netchecker_port: 31081 +agent_report_interval: 15 +netcheck_namespace: default +``` + +Note that the application verifies DNS resolve for FQDNs comprising only the +combination of the ``netcheck_namespace.dns_domain`` vars, for example the +``netchecker-service.default.svc.cluster.local``. If you want to deploy the application +to the non default namespace, make sure as well to adjust the ``searchdomains`` var +so the resulting search domain records to contain that namespace, like: + +```yml +search: foospace.cluster.local default.cluster.local ... +nameserver: ... +``` diff --git a/kubespray/docs/nodes.md b/kubespray/docs/nodes.md new file mode 100644 index 0000000..2cd9e9a --- /dev/null +++ b/kubespray/docs/nodes.md @@ -0,0 +1,182 @@ +# Adding/replacing a node + +Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084) + +## Limitation: Removal of first kube_control_plane and etcd-master + +Currently you can't remove the first node in your kube_control_plane and etcd-master list. If you still want to remove this node you have to: + +### 1) Change order of current control planes + +Modify the order of your control plane list by pushing your first entry to any other position. E.g. if you want to remove `node-1` of the following example: + +```yaml + children: + kube_control_plane: + hosts: + node-1: + node-2: + node-3: + kube_node: + hosts: + node-1: + node-2: + node-3: + etcd: + hosts: + node-1: + node-2: + node-3: +``` + +change your inventory to: + +```yaml + children: + kube_control_plane: + hosts: + node-2: + node-3: + node-1: + kube_node: + hosts: + node-2: + node-3: + node-1: + etcd: + hosts: + node-2: + node-3: + node-1: +``` + +## 2) Upgrade the cluster + +run `upgrade-cluster.yml` or `cluster.yml`. Now you are good to go on with the removal. + +## Adding/replacing a worker node + +This should be the easiest. + +### 1) Add new node to the inventory + +### 2) Run `scale.yml` + +You can use `--limit=NODE_NAME` to limit Kubespray to avoid disturbing other nodes in the cluster. + +Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes. + +### 3) Remove an old node with remove-node.yml + +With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed. + +If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars: `-e node=NODE_NAME -e reset_nodes=false -e allow_ungraceful_removal=true`. +Use this flag even when you remove other types of nodes like a control plane or etcd nodes. + +### 4) Remove the node from the inventory + +That's it. + +## Adding/replacing a control plane node + +### 1) Run `cluster.yml` + +Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that. + +### 2) Restart kube-system/nginx-proxy + +In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload. + +```sh +# run in every host +docker ps | grep k8s_nginx-proxy_nginx-proxy | awk '{print $1}' | xargs docker restart +``` + +### 3) Remove old control plane nodes + +With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed. +If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars. + +## Replacing a first control plane node + +### 1) Change control plane nodes order in inventory + +from + +```ini +[kube_control_plane] + node-1 + node-2 + node-3 +``` + +to + +```ini +[kube_control_plane] + node-2 + node-3 + node-1 +``` + +### 2) Remove old first control plane node from cluster + +With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed. +If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars. + +### 3) Edit cluster-info configmap in kube-public namespace + +`kubectl edit cm -n kube-public cluster-info` + +Change ip of old kube_control_plane node with ip of live kube_control_plane node (`server` field). Also, update `certificate-authority-data` field if you changed certs. + +### 4) Add new control plane node + +Update inventory (if needed) + +Run `cluster.yml` with `--limit=kube_control_plane` + +## Adding an etcd node + +You need to make sure there are always an odd number of etcd nodes in the cluster. In such a way, this is always a replace or scale up operation. Either add two new nodes or remove an old one. + +### 1) Add the new node running cluster.yml + +Update the inventory and run `cluster.yml` passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. +If the node you want to add as an etcd node is already a worker or control plane node in your cluster, you have to remove him first using `remove-node.yml`. + +Run `upgrade-cluster.yml` also passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster. + +At this point, you will have an even number of nodes. +Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node. +Even so, running applications should continue to be available. + +If you add multiple etcd nodes with one run, you might want to append `-e etcd_retries=10` to increase the amount of retries between each etcd node join. +Otherwise the etcd cluster might still be processing the first join and fail on subsequent nodes. `etcd_retries=10` might work to join 3 new nodes. + +### 2) Add the new node to apiserver config + +In every control plane node, edit `/etc/kubernetes/manifests/kube-apiserver.yaml`. Make sure the new etcd nodes are present in the apiserver command line parameter `--etcd-servers=...`. + +## Removing an etcd node + +### 1) Remove an old etcd node + +With the node still in the inventory, run `remove-node.yml` passing `-e node=NODE_NAME` as the name of the node that should be removed. +If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars. + +### 2) Make sure only remaining nodes are in your inventory + +Remove `NODE_NAME` from your inventory file. + +### 3) Update kubernetes and network configuration files with the valid list of etcd members + +Run `cluster.yml` to regenerate the configuration files on all remaining nodes. + +### 4) Remove the old etcd node from apiserver config + +In every control plane node, edit `/etc/kubernetes/manifests/kube-apiserver.yaml`. Make sure only active etcd nodes are still present in the apiserver command line parameter `--etcd-servers=...`. + +### 5) Shutdown the old instance + +That's it. diff --git a/kubespray/docs/ntp.md b/kubespray/docs/ntp.md new file mode 100644 index 0000000..a91e09e --- /dev/null +++ b/kubespray/docs/ntp.md @@ -0,0 +1,50 @@ +# NTP synchronization + +The Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems. Time synchronization is important to Kubernetes and Etcd. + +## Enable the NTP + +To start the ntpd(or chrony) service and enable it at system boot. There are related specific variables: + +```ShellSession +ntp_enabled: true +``` + +The NTP service would be enabled and sync time automatically. + +## Customize the NTP configure file + +In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file. + +```ShellSession +ntp_enabled: true +ntp_manage_config: true +ntp_servers: + - "0.your-ntp-server.org iburst" + - "1.your-ntp-server.org iburst" + - "2.your-ntp-server.org iburst" + - "3.your-ntp-server.org iburst" +``` + +## Setting the TimeZone + +The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change. + +```ShellSession +ntp_enabled: true +ntp_timezone: Etc/UTC +``` + +## Advanced Configure + +Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true. + +```ShellSession +ntp_tinker_panic: true +``` + +Force sync time immediately by NTP after the ntp installed, which is useful in newly installed system. + +```ShellSession +ntp_force_sync_immediately: true +``` diff --git a/kubespray/docs/offline-environment.md b/kubespray/docs/offline-environment.md new file mode 100644 index 0000000..fd345f3 --- /dev/null +++ b/kubespray/docs/offline-environment.md @@ -0,0 +1,132 @@ +# Offline environment + +In case your servers don't have access to the internet directly (for example +when deploying on premises with security constraints), you need to get the +following artifacts in advance from another environment where has access to the internet. + +* Some static files (zips and binaries) +* OS packages (rpm/deb files) +* Container images used by Kubespray. Exhaustive list depends on your setup +* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`) +* [Optional] Helm chart files (only required if `helm_enabled=true`) + +Then you need to setup the following services on your offline environment: + +* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries) +* an internal Yum/Deb repository for OS packages +* an internal container image registry that need to be populated with all container images used by Kubespray +* [Optional] an internal PyPi server for python packages used by Kubespray +* [Optional] an internal Helm registry for Helm chart files + +You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script. +In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md). + +## Configure Inventory + +Once all artifacts are accessible from your internal network, **adjust** the following variables in [your inventory](/inventory/sample/group_vars/all/offline.yml) to match your environment: + +```yaml +# Registry overrides +kube_image_repo: "{{ registry_host }}" +gcr_image_repo: "{{ registry_host }}" +docker_image_repo: "{{ registry_host }}" +quay_image_repo: "{{ registry_host }}" + +kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm" +kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl" +kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet" +# etcd is optional if you **DON'T** use etcd_deployment=host +etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" +cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" +crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +# If using Calico +calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# If using Calico with kdd +calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz" +# Containerd +containerd_download_url: "{{ files_repo }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}" +nerdctl_download_url: "{{ files_repo }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +# Insecure registries for containerd +containerd_insecure_registries: + - "{{ registry_host }}" + +# CentOS/Redhat/AlmaLinux/Rocky Linux +## Docker / Containerd +docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +# Fedora +## Docker +docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +## Containerd +containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +# Debian +## Docker +docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +## Containerd +containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd" +containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +containerd_debian_repo_repokey: 'YOURREPOKEY' + +# Ubuntu +## Docker +docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +## Containerd +containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +containerd_ubuntu_repo_repokey: 'YOURREPOKEY' +``` + +For the OS specific settings, just define the one matching your OS. +If you use the settings like the one above, you'll need to define in your inventory the following variables: + +* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that the ones defined in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml), you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the same repository path, you won't have to override anything else. +* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so that you don't need to modify this setting everytime kubespray upgrades one of these components. +* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal repository. Adjust the path accordingly. + +## Install Kubespray Python Packages + +### Recommended way: Kubespray Container Image + +The easiest way is to use [kubespray container image](https://quay.io/kubespray/kubespray) as all the required packages are baked in the image. +Just copy the container image in your private container image registry and you are all set! + +### Manual installation + +Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a PyPi server in your network that will host these packages. + +If you're using a HTTP(S) proxy to download your python packages: + +```bash +sudo pip install --proxy=https://[username:password@]proxyserver:port -r requirements.txt +``` + +When using an internal PyPi server: + +```bash +# If you host all required packages +pip install -i https://pypiserver/pypi -r requirements.txt + +# If you only need the ones missing from the OS package manager +pip install -i https://pypiserver/pypi package_you_miss +``` + +## Run Kubespray as usual + +Once all artifacts are in place and your inventory properly set up, you can run kubespray with the regular `cluster.yaml` command: + +```bash +ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml +``` + +If you use [Kubespray Container Image](#recommended-way:-kubespray-container-image), you can mount your inventory inside the container: + +```bash +docker run --rm -it -v path_to_inventory/my_airgap_cluster:inventory/my_airgap_cluster myprivateregisry.com/kubespray/kubespray:v2.14.0 ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml +``` diff --git a/kubespray/docs/openeuler.md b/kubespray/docs/openeuler.md new file mode 100644 index 0000000..c585d37 --- /dev/null +++ b/kubespray/docs/openeuler.md @@ -0,0 +1,11 @@ +# OpenEuler + +[OpenEuler](https://www.openeuler.org/en/) Linux is supported with docker and containerd runtimes. + +**Note:** that OpenEuler Linux is not currently covered in kubespray CI and +support for it is currently considered experimental. + +At present, only `openEuler 22.03 LTS` has been adapted, which can support the deployment of aarch64 and x86_64 platforms. + +There are no special considerations for using OpenEuler Linux as the target OS +for Kubespray deployments. diff --git a/kubespray/docs/openstack.md b/kubespray/docs/openstack.md new file mode 100644 index 0000000..189592e --- /dev/null +++ b/kubespray/docs/openstack.md @@ -0,0 +1,158 @@ + +# OpenStack + +## Known compatible public clouds + +Kubespray has been tested on a number of OpenStack Public Clouds including (in alphabetical order): + +- [Auro](https://auro.io/) +- [Betacloud](https://www.betacloud.io/) +- [CityCloud](https://www.citycloud.com/) +- [DreamHost](https://www.dreamhost.com/cloud/computing/) +- [ELASTX](https://elastx.se/) +- [EnterCloudSuite](https://www.entercloudsuite.com/) +- [FugaCloud](https://fuga.cloud/) +- [Infomaniak](https://infomaniak.com) +- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars +- [OVHcloud](https://www.ovhcloud.com/) +- [Rackspace](https://www.rackspace.com/) +- [Ultimum](https://ultimum.io/) +- [VexxHost](https://vexxhost.com/) +- [Zetta](https://www.zetta.io/) + +## The in-tree cloud provider + +To deploy Kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `openstack`. + +After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`. + +For those who prefer to pass the OpenStack CA certificate as a string, one can +base64 encode the cacert file and store it in the variable `openstack_cacert`. + +The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. +Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected. + +Unless you are using calico or kube-router you can now run the playbook. + +## The external cloud provider + +The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21. + +The new cloud provider is configured to have Octavia by default in Kubespray. + +- Enable the new external cloud provider in `group_vars/all/all.yml`: + + ```yaml + cloud_provider: external + external_cloud_provider: openstack + ``` + +- Enable Cinder CSI in `group_vars/all/openstack.yml`: + + ```yaml + cinder_csi_enabled: true + ``` + +- Enable topology support (optional), if your openstack provider has custom Zone names you can override the default "nova" zone by setting the variable `cinder_topology_zones` + + ```yaml + cinder_topology: true + ``` + +- Enabling `cinder_csi_ignore_volume_az: true`, ignores volumeAZ and schedules on any of the available node AZ. + + ```yaml + cinder_csi_ignore_volume_az: true + ``` + +- If you are using OpenStack loadbalancer(s) replace the `openstack_lbaas_subnet_id` with the new `external_openstack_lbaas_subnet_id`. **Note** The new cloud provider is using Octavia instead of Neutron LBaaS by default! +- Enable 3 feature gates to allow migration of all volumes and storage classes (if you have any feature gates already set just add the 3 listed below): + + ```yaml + kube_feature_gates: + - CSIMigration=true + - CSIMigrationOpenStack=true + - ExpandCSIVolumes=true + ``` + +- If you are in a case of a multi-nic OpenStack VMs (see [kubernetes/cloud-provider-openstack#407](https://github.com/kubernetes/cloud-provider-openstack/issues/407) and [#6083](https://github.com/kubernetes-sigs/kubespray/issues/6083) for explanation), you should override the default OpenStack networking configuration: + + ```yaml + external_openstack_network_ipv6_disabled: false + external_openstack_network_internal_networks: [] + external_openstack_network_public_networks: [] + ``` + +- You can override the default OpenStack metadata configuration (see [#6338](https://github.com/kubernetes-sigs/kubespray/issues/6338) for explanation): + + ```yaml + external_openstack_metadata_search_order: "configDrive,metadataService" + ``` + +- Available variables for configuring lbaas: + + ```yaml + external_openstack_lbaas_create_monitor: false + external_openstack_lbaas_monitor_delay: "1m" + external_openstack_lbaas_monitor_timeout: "30s" + external_openstack_lbaas_monitor_max_retries: "3" + external_openstack_lbaas_provider: octavia + external_openstack_lbaas_use_octavia: false + external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" + external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" + external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" + external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" + external_openstack_lbaas_method: "ROUND_ROBIN" + external_openstack_lbaas_manage_security_groups: false + external_openstack_lbaas_internal_lb: false + + ``` + +- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider. +- Run the `cluster.yml` playbook + +## Additional step needed when using calico or kube-router + +Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly. + +OpenStack will filter and drop all packets from ips it does not know to prevent spoofing. + +In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use. + +First you will need the ids of your OpenStack instances that will run kubernetes: + + ```bash + openstack server list --project YOUR_PROJECT + +--------------------------------------+--------+----------------------------------+--------+-------------+ + | ID | Name | Tenant ID | Status | Power State | + +--------------------------------------+--------+----------------------------------+--------+-------------+ + | e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running | + | 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running | + ``` + +Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack): + + ```bash + openstack port list -c id -c device_id --project YOUR_PROJECT + +--------------------------------------+--------------------------------------+ + | id | device_id | + +--------------------------------------+--------------------------------------+ + | 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 | + | e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 | + ``` + +Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.) + + ```bash + # allow kube_service_addresses and kube_pods_subnet network + openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 + openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 + ``` + +If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with: + + ```bash + openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 + ``` + +Now you can finally run the playbook. diff --git a/kubespray/docs/opensuse.md b/kubespray/docs/opensuse.md new file mode 100644 index 0000000..47b01f0 --- /dev/null +++ b/kubespray/docs/opensuse.md @@ -0,0 +1,17 @@ +# openSUSE Leap 15.3 and Tumbleweed + +openSUSE Leap installation Notes: + +- Install Ansible + + ```ShellSession + sudo zypper ref + sudo zypper -n install ansible + + ``` + +- Install Jinja2 and Python-Netaddr + + ```sudo zypper -n install python-Jinja2 python-netaddr``` + +Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) diff --git a/kubespray/docs/proxy.md b/kubespray/docs/proxy.md new file mode 100644 index 0000000..9c72019 --- /dev/null +++ b/kubespray/docs/proxy.md @@ -0,0 +1,23 @@ +# Setting up Environment Proxy + +If you set http and https proxy, all nodes and loadbalancer will be excluded from proxy with generating no_proxy variable in `roles/kubespray-defaults/tasks/no_proxy.yml`, if you have additional resources for exclude add them to `additional_no_proxy` variable. If you want fully override your `no_proxy` setting, then fill in just `no_proxy` and no nodes or loadbalancer addresses will be added to no_proxy. + +## Set proxy for http and https + + `http_proxy:"http://example.proxy.tld:port"` + `https_proxy:"http://example.proxy.tld:port"` + +## Set default no_proxy (this will override default no_proxy generation) + +`no_proxy: "node1,node1_ip,node2,node2_ip...additional_host"` + +## Set additional addresses to default no_proxy (all cluster nodes and loadbalancer) + +`additional_no_proxy: "additional_host1,additional_host2"` + +## Exclude workers from no_proxy + +Since workers are included in the no_proxy variable, by default, docker engine will be restarted on all nodes (all +pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes in the +no_proxy variable, set: +`no_proxy_exclude_workers: true` diff --git a/kubespray/docs/recover-control-plane.md b/kubespray/docs/recover-control-plane.md new file mode 100644 index 0000000..0b80da2 --- /dev/null +++ b/kubespray/docs/recover-control-plane.md @@ -0,0 +1,42 @@ + +# Recovering the control plane + +To recover from broken nodes in the control plane use the "recover\-control\-plane.yml" playbook. + +* Backup what you can +* Provision new nodes to replace the broken ones +* Place the surviving nodes of the control plane first in the "etcd" and "kube\_control\_plane" groups +* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube\_control\_plane" groups + +Examples of what broken means in this context: + +* One or more bare metal node(s) suffer from unrecoverable hardware failure +* One or more node(s) fail during patching or upgrading +* Etcd database corruption +* Other node related failures leaving your control plane degraded or nonfunctional + +__Note that you need at least one functional node to be able to recover using this method.__ + +## Runbook + +* Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set. +* Move any broken control plane nodes into the "broken\_kube\_control\_plane" group. + +Then run the playbook with ```--limit etcd,kube_control_plane``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict. + +When finished you should have a fully working control plane again. + +## Recover from lost quorum + +The playbook attempts to figure out it the etcd quorum is intact. If quorum is lost it will attempt to take a snapshot from the first node in the "etcd" group and restore from that. If you would like to restore from an alternate snapshot set the path to that snapshot in the "etcd\_snapshot" variable. + +```-e etcd_snapshot=/tmp/etcd_snapshot``` + +## Caveats + +* The playbook has only been tested with fairly small etcd databases. +* If your new control plane nodes have new ip addresses you may have to change settings in various places. +* There may be disruptions while running the playbook. +* There are absolutely no guarantees. + +If possible try to break a cluster in the same way that your target cluster is broken and test to recover that before trying on the real target cluster. diff --git a/kubespray/docs/rhel.md b/kubespray/docs/rhel.md new file mode 100644 index 0000000..b9f302e --- /dev/null +++ b/kubespray/docs/rhel.md @@ -0,0 +1,38 @@ +# Red Hat Enterprise Linux (RHEL) + +## RHEL Support Subscription Registration + +In order to install packages via yum or dnf, RHEL 7/8 hosts are required to be registered for a valid Red Hat support subscription. + +You can apply for a 1-year Development support subscription by creating a [Red Hat Developers](https://developers.redhat.com/) account. Be aware though that as the Red Hat Developers subscription is limited to only 1 year, it should not be used to register RHEL 7/8 hosts provisioned in Production environments. + +Once you have a Red Hat support account, simply add the credentials to the Ansible inventory parameters `rh_subscription_username` and `rh_subscription_password` prior to deploying Kubespray. If your company has a Corporate Red Hat support account, then obtain an **Organization ID** and **Activation Key**, and add these to the Ansible inventory parameters `rh_subscription_org_id` and `rh_subscription_activation_key` instead of using your Red Hat support account credentials. + +```ini +rh_subscription_username: "" +rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +``` + +Either the Red Hat support account username/password, or Organization ID/Activation Key combination must be specified in the Ansible inventory in order for the Red Hat subscription registration to complete successfully during the deployment of Kubespray. + +Update the Ansible inventory parameters `rh_subscription_usage`, `rh_subscription_role` and `rh_subscription_sla` if necessary to suit your specific requirements. + +```ini +rh_subscription_usage: "Development" +rh_subscription_role: "Red Hat Enterprise Server" +rh_subscription_sla: "Self-Support" +``` + +If the RHEL 7/8 hosts are already registered to a valid Red Hat support subscription via an alternative configuration management approach prior to the deployment of Kubespray, the successful RHEL `subscription-manager` status check will simply result in the RHEL subscription registration tasks being skipped. + +## RHEL 8 + +RHEL 8 ships only with iptables-nft (ie without iptables-legacy) +The only tested configuration for now is using Calico CNI +You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your configuration + +If you have containers that are using iptables in the host network namespace (`hostNetwork=true`), +you need to ensure they are using iptables-nft. +An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966) diff --git a/kubespray/docs/roadmap.md b/kubespray/docs/roadmap.md new file mode 100644 index 0000000..78166b8 --- /dev/null +++ b/kubespray/docs/roadmap.md @@ -0,0 +1,3 @@ +# Kubespray's roadmap + +We are tracking the evolution towards Kubespray 3.0 in [#6400](https://github.com/kubernetes-sigs/kubespray/issues/6400) as well as in other open issue in our [github issues](https://github.com/kubernetes-sigs/kubespray/issues/) section. diff --git a/kubespray/docs/setting-up-your-first-cluster.md b/kubespray/docs/setting-up-your-first-cluster.md new file mode 100644 index 0000000..03622da --- /dev/null +++ b/kubespray/docs/setting-up-your-first-cluster.md @@ -0,0 +1,642 @@ +# Setting up your first cluster with Kubespray + +This tutorial walks you through the detailed steps for setting up Kubernetes +with [Kubespray](https://kubespray.io/). + +The guide is inspired on the tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way), with the +difference that here we want to showcase how to spin up a Kubernetes cluster +in a more managed fashion with Kubespray. + +## Target Audience + +The target audience for this tutorial is someone looking for a +hands-on guide to get started with Kubespray. + +## Cluster Details + +* [kubespray](https://github.com/kubernetes-sigs/kubespray) +* [kubernetes](https://github.com/kubernetes/kubernetes) + +## Prerequisites + +* Google Cloud Platform: This tutorial leverages the [Google Cloud Platform](https://cloud.google.com/) to streamline provisioning of the compute infrastructure required to bootstrap a Kubernetes cluster from the ground up. [Sign up](https://cloud.google.com/free/) for $300 in free credits. +* Google Cloud Platform SDK: Follow the Google Cloud SDK [documentation](https://cloud.google.com/sdk/) to install and configure the `gcloud` command + line utility. Make sure to set a default compute region and compute zone. +* The [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command line utility is used to interact with the Kubernetes + API Server. +* Linux or Mac environment with Python 3 + +## Provisioning Compute Resources + +Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [compute zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones). + +### Networking + +The Kubernetes [networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#kubernetes-model) assumes a flat network in which containers and nodes can communicate with each other. In cases where this is not desired [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can limit how groups of containers are allowed to communicate with each other and external network endpoints. + +> Setting up network policies is out of scope for this tutorial. + +#### Virtual Private Cloud Network + +In this section a dedicated [Virtual Private Cloud](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) (VPC) network will be setup to host the Kubernetes cluster. + +Create the `kubernetes-the-kubespray-way` custom VPC network: + +```ShellSession +gcloud compute networks create kubernetes-the-kubespray-way --subnet-mode custom +``` + +A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster. + +Create the `kubernetes` subnet in the `kubernetes-the-kubespray-way` VPC network: + +```ShellSession +gcloud compute networks subnets create kubernetes \ + --network kubernetes-the-kubespray-way \ + --range 10.240.0.0/24 + ``` + +> The `10.240.0.0/24` IP address range can host up to 254 compute instances. + +#### Firewall Rules + +Create a firewall rule that allows internal communication across all protocols. +It is important to note that the vxlan protocol has to be allowed in order for +the calico (see later) networking plugin to work. + +```ShellSession +gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-internal \ + --allow tcp,udp,icmp,vxlan \ + --network kubernetes-the-kubespray-way \ + --source-ranges 10.240.0.0/24 +``` + +Create a firewall rule that allows external SSH, ICMP, and HTTPS: + +```ShellSession +gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-external \ + --allow tcp:80,tcp:6443,tcp:443,tcp:22,icmp \ + --network kubernetes-the-kubespray-way \ + --source-ranges 0.0.0.0/0 +``` + +It is not feasible to restrict the firewall to a specific IP address from +where you are accessing the cluster as the nodes also communicate over the public internet and would otherwise run into +this firewall. Technically you could limit the firewall to the (fixed) IP +addresses of the cluster nodes and the remote IP addresses for accessing the +cluster. + +### Compute Instances + +The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04. +Each compute instance will be provisioned with a fixed private IP address and + a public IP address (that can be fixed - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)). +Using fixed public IP addresses has the advantage that our cluster node +configuration does not need to be updated with new public IP addresses every +time the machines are shut down and later on restarted. + +Create three compute instances which will host the Kubernetes control plane: + +```ShellSession +for i in 0 1 2; do + gcloud compute instances create controller-${i} \ + --async \ + --boot-disk-size 200GB \ + --can-ip-forward \ + --image-family ubuntu-1804-lts \ + --image-project ubuntu-os-cloud \ + --machine-type e2-standard-2 \ + --private-network-ip 10.240.0.1${i} \ + --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ + --subnet kubernetes \ + --tags kubernetes-the-kubespray-way,controller +done +``` + +> Do not forget to fix the IP addresses if you plan on re-using the cluster +after temporarily shutting down the VMs - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address) + +Create three compute instances which will host the Kubernetes worker nodes: + +```ShellSession +for i in 0 1 2; do + gcloud compute instances create worker-${i} \ + --async \ + --boot-disk-size 200GB \ + --can-ip-forward \ + --image-family ubuntu-1804-lts \ + --image-project ubuntu-os-cloud \ + --machine-type e2-standard-2 \ + --private-network-ip 10.240.0.2${i} \ + --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ + --subnet kubernetes \ + --tags kubernetes-the-kubespray-way,worker +done +``` + +> Do not forget to fix the IP addresses if you plan on re-using the cluster +after temporarily shutting down the VMs - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address) + +List the compute instances in your default compute zone: + +```ShellSession +gcloud compute instances list --filter="tags.items=kubernetes-the-kubespray-way" +``` + +> Output + +```ShellSession +NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS +controller-0 us-west1-c e2-standard-2 10.240.0.10 XX.XX.XX.XXX RUNNING +controller-1 us-west1-c e2-standard-2 10.240.0.11 XX.XXX.XXX.XX RUNNING +controller-2 us-west1-c e2-standard-2 10.240.0.12 XX.XXX.XX.XXX RUNNING +worker-0 us-west1-c e2-standard-2 10.240.0.20 XX.XX.XXX.XXX RUNNING +worker-1 us-west1-c e2-standard-2 10.240.0.21 XX.XX.XX.XXX RUNNING +worker-2 us-west1-c e2-standard-2 10.240.0.22 XX.XXX.XX.XX RUNNING +``` + +### Configuring SSH Access + +Kubespray is relying on SSH to configure the controller and worker instances. + +Test SSH access to the `controller-0` compute instance: + +```ShellSession +IP_CONTROLLER_0=$(gcloud compute instances list --filter="tags.items=kubernetes-the-kubespray-way AND name:controller-0" --format="value(EXTERNAL_IP)") +USERNAME=$(whoami) +ssh $USERNAME@$IP_CONTROLLER_0 +``` + +If this is your first time connecting to a compute instance SSH keys will be +generated for you. In this case you will need to enter a passphrase at the +prompt to continue. + +> If you get a 'Remote host identification changed!' warning, you probably +already connected to that IP address in the past with another host key. You +can remove the old host key by running `ssh-keygen -R $IP_CONTROLLER_0` + +Please repeat this procedure for all the controller and worker nodes, to +ensure that SSH access is properly functioning for all nodes. + +## Set-up Kubespray + +The following set of instruction is based on the [Quick Start](https://github.com/kubernetes-sigs/kubespray) but slightly altered for our +set-up. + +As Ansible is a python application, we will create a fresh virtual +environment to install the dependencies for the Kubespray playbook: + +```ShellSession +python3 -m venv venv +source venv/bin/activate +``` + +Next, we will git clone the Kubespray code into our working directory: + +```ShellSession +git clone https://github.com/kubernetes-sigs/kubespray.git +cd kubespray +git checkout release-2.17 +``` + +Now we need to install the dependencies for Ansible to run the Kubespray +playbook: + +```ShellSession +pip install -r requirements.txt +``` + +Copy ``inventory/sample`` as ``inventory/mycluster``: + +```ShellSession +cp -rfp inventory/sample inventory/mycluster +``` + +Update Ansible inventory file with inventory builder: + +```ShellSession +declare -a IPS=($(gcloud compute instances list --filter="tags.items=kubernetes-the-kubespray-way" --format="value(EXTERNAL_IP)" | tr '\n' ' ')) +CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]} +``` + +Open the generated `inventory/mycluster/hosts.yaml` file and adjust it so +that controller-0, controller-1 and controller-2 are control plane nodes and +worker-0, worker-1 and worker-2 are worker nodes. Also update the `ip` to the respective local VPC IP and +remove the `access_ip`. + +The main configuration for the cluster is stored in +`inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml`. In this file we + will update the `supplementary_addresses_in_ssl_keys` with a list of the IP + addresses of the controller nodes. In that way we can access the + kubernetes API server as an administrator from outside the VPC network. You + can also see that the `kube_network_plugin` is by default set to 'calico'. + If you set this to 'cloud', it did not work on GCP at the time of testing. + +Kubespray also offers to easily enable popular kubernetes add-ons. You can +modify the +list of add-ons in `inventory/mycluster/group_vars/k8s_cluster/addons.yml`. +Let's enable the metrics server as this is a crucial monitoring element for +the kubernetes cluster, just change the 'false' to 'true' for +`metrics_server_enabled`. + +Now we will deploy the configuration: + +```ShellSession +ansible-playbook -i inventory/mycluster/hosts.yaml -u $USERNAME -b -v --private-key=~/.ssh/id_rsa cluster.yml +``` + +Ansible will now execute the playbook, this can take up to 20 minutes. + +## Access the kubernetes cluster + +We will leverage a kubeconfig file from one of the controller nodes to access + the cluster as administrator from our local workstation. + +> In this simplified set-up, we did not include a load balancer that usually sits on top of the three controller nodes for a high available API server endpoint. In this simplified tutorial we connect directly to one of the three controllers. + +First, we need to edit the permission of the kubeconfig file on one of the +controller nodes: + +```ShellSession +ssh $USERNAME@$IP_CONTROLLER_0 +USERNAME=$(whoami) +sudo chown -R $USERNAME:$USERNAME /etc/kubernetes/admin.conf +exit +``` + +Now we will copy over the kubeconfig file: + +```ShellSession +scp $USERNAME@$IP_CONTROLLER_0:/etc/kubernetes/admin.conf kubespray-do.conf +``` + +This kubeconfig file uses the internal IP address of the controller node to +access the API server. This kubeconfig file will thus not work of from +outside of the VPC network. We will need to change the API server IP address +to the controller node his external IP address. The external IP address will be +accepted in the +TLS negotiation as we added the controllers external IP addresses in the SSL +certificate configuration. +Open the file and modify the server IP address from the local IP to the +external IP address of controller-0, as stored in $IP_CONTROLLER_0. + +> Example + +```ShellSession +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: XXX + server: https://35.205.205.80:6443 + name: cluster.local +... +``` + +Now, we load the configuration for `kubectl`: + +```ShellSession +export KUBECONFIG=$PWD/kubespray-do.conf +``` + +We should be all set to communicate with our cluster from our local workstation: + +```ShellSession +kubectl get nodes +``` + +> Output + +```ShellSession +NAME STATUS ROLES AGE VERSION +controller-0 Ready master 47m v1.17.9 +controller-1 Ready master 46m v1.17.9 +controller-2 Ready master 46m v1.17.9 +worker-0 Ready 45m v1.17.9 +worker-1 Ready 45m v1.17.9 +worker-2 Ready 45m v1.17.9 +``` + +## Smoke tests + +### Metrics + +Verify if the metrics server addon was correctly installed and works: + +```ShellSession +kubectl top nodes +``` + +> Output + +```ShellSession +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +controller-0 191m 10% 1956Mi 26% +controller-1 190m 10% 1828Mi 24% +controller-2 182m 10% 1839Mi 24% +worker-0 87m 4% 1265Mi 16% +worker-1 102m 5% 1268Mi 16% +worker-2 108m 5% 1299Mi 17% +``` + +Please note that metrics might not be available at first and need a couple of + minutes before you can actually retrieve them. + +### Network + +Let's verify if the network layer is properly functioning and pods can reach +each other: + +```ShellSession +kubectl run myshell1 -it --rm --image busybox -- sh +hostname -i +# launch myshell2 in separate terminal (see next code block) and ping the hostname of myshell2 +ping +``` + +```ShellSession +kubectl run myshell2 -it --rm --image busybox -- sh +hostname -i +ping +``` + +> Output + +```ShellSession +PING 10.233.108.2 (10.233.108.2): 56 data bytes +64 bytes from 10.233.108.2: seq=0 ttl=62 time=2.876 ms +64 bytes from 10.233.108.2: seq=1 ttl=62 time=0.398 ms +64 bytes from 10.233.108.2: seq=2 ttl=62 time=0.378 ms +^C +--- 10.233.108.2 ping statistics --- +3 packets transmitted, 3 packets received, 0% packet loss +round-trip min/avg/max = 0.378/1.217/2.876 ms +``` + +### Deployments + +In this section you will verify the ability to create and manage [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). + +Create a deployment for the [nginx](https://nginx.org/en/) web server: + +```ShellSession +kubectl create deployment nginx --image=nginx +``` + +List the pod created by the `nginx` deployment: + +```ShellSession +kubectl get pods -l app=nginx +``` + +> Output + +```ShellSession +NAME READY STATUS RESTARTS AGE +nginx-86c57db685-bmtt8 1/1 Running 0 18s +``` + +#### Port Forwarding + +In this section you will verify the ability to access applications remotely using [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/). + +Retrieve the full name of the `nginx` pod: + +```ShellSession +POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}") +``` + +Forward port `8080` on your local machine to port `80` of the `nginx` pod: + +```ShellSession +kubectl port-forward $POD_NAME 8080:80 +``` + +> Output + +```ShellSession +Forwarding from 127.0.0.1:8080 -> 80 +Forwarding from [::1]:8080 -> 80 +``` + +In a new terminal make an HTTP request using the forwarding address: + +```ShellSession +curl --head http://127.0.0.1:8080 +``` + +> Output + +```ShellSession +HTTP/1.1 200 OK +Server: nginx/1.19.1 +Date: Thu, 13 Aug 2020 11:12:04 GMT +Content-Type: text/html +Content-Length: 612 +Last-Modified: Tue, 07 Jul 2020 15:52:25 GMT +Connection: keep-alive +ETag: "5f049a39-264" +Accept-Ranges: bytes +``` + +Switch back to the previous terminal and stop the port forwarding to the `nginx` pod: + +```ShellSession +Forwarding from 127.0.0.1:8080 -> 80 +Forwarding from [::1]:8080 -> 80 +Handling connection for 8080 +^C +``` + +#### Logs + +In this section you will verify the ability to [retrieve container logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/). + +Print the `nginx` pod logs: + +```ShellSession +kubectl logs $POD_NAME +``` + +> Output + +```ShellSession +... +127.0.0.1 - - [13/Aug/2020:11:12:04 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.64.1" "-" +``` + +#### Exec + +In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container). + +Print the nginx version by executing the `nginx -v` command in the `nginx` container: + +```ShellSession +kubectl exec -ti $POD_NAME -- nginx -v +``` + +> Output + +```ShellSession +nginx version: nginx/1.19.1 +``` + +### Kubernetes services + +#### Expose outside of the cluster + +In this section you will verify the ability to expose applications using a [Service](https://kubernetes.io/docs/concepts/services-networking/service/). + +Expose the `nginx` deployment using a [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) service: + +```ShellSession +kubectl expose deployment nginx --port 80 --type NodePort +``` + +> The LoadBalancer service type can not be used because your cluster is not configured with [cloud provider integration](https://kubernetes.io/docs/getting-started-guides/scratch/#cloud-provider). Setting up cloud provider integration is out of scope for this tutorial. + +Retrieve the node port assigned to the `nginx` service: + +```ShellSession +NODE_PORT=$(kubectl get svc nginx \ + --output=jsonpath='{range .spec.ports[0]}{.nodePort}') +``` + +Create a firewall rule that allows remote access to the `nginx` node port: + +```ShellSession +gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-nginx-service \ + --allow=tcp:${NODE_PORT} \ + --network kubernetes-the-kubespray-way +``` + +Retrieve the external IP address of a worker instance: + +```ShellSession +EXTERNAL_IP=$(gcloud compute instances describe worker-0 \ + --format 'value(networkInterfaces[0].accessConfigs[0].natIP)') +``` + +Make an HTTP request using the external IP address and the `nginx` node port: + +```ShellSession +curl -I http://${EXTERNAL_IP}:${NODE_PORT} +``` + +> Output + +```ShellSession +HTTP/1.1 200 OK +Server: nginx/1.19.1 +Date: Thu, 13 Aug 2020 11:15:02 GMT +Content-Type: text/html +Content-Length: 612 +Last-Modified: Tue, 07 Jul 2020 15:52:25 GMT +Connection: keep-alive +ETag: "5f049a39-264" +Accept-Ranges: bytes +``` + +#### Local DNS + +We will now also verify that kubernetes built-in DNS works across namespaces. +Create a namespace: + +```ShellSession +kubectl create namespace dev +``` + +Create an nginx deployment and expose it within the cluster: + +```ShellSession +kubectl create deployment nginx --image=nginx -n dev +kubectl expose deployment nginx --port 80 --type ClusterIP -n dev +``` + +Run a temporary container to see if we can reach the service from the default +namespace: + +```ShellSession +kubectl run curly -it --rm --image curlimages/curl:7.70.0 -- /bin/sh +curl --head http://nginx.dev:80 +``` + +> Output + +```ShellSession +HTTP/1.1 200 OK +Server: nginx/1.19.1 +Date: Thu, 13 Aug 2020 11:15:59 GMT +Content-Type: text/html +Content-Length: 612 +Last-Modified: Tue, 07 Jul 2020 15:52:25 GMT +Connection: keep-alive +ETag: "5f049a39-264" +Accept-Ranges: bytes +``` + +Type `exit` to leave the shell. + +## Cleaning Up + +### Kubernetes resources + +Delete the dev namespace, the nginx deployment and service: + +```ShellSession +kubectl delete namespace dev +kubectl delete deployment nginx +kubectl delete svc/nginx +``` + +### Kubernetes state + +Note: you can skip this step if you want to entirely remove the machines. + +If you want to keep the VMs and just remove the cluster state, you can simply + run another Ansible playbook: + +```ShellSession +ansible-playbook -i inventory/mycluster/hosts.yaml -u $USERNAME -b -v --private-key=~/.ssh/id_rsa reset.yml +``` + +Resetting the cluster to the VMs original state usually takes about a couple +of minutes. + +### Compute instances + +Delete the controller and worker compute instances: + +```ShellSession +gcloud -q compute instances delete \ + controller-0 controller-1 controller-2 \ + worker-0 worker-1 worker-2 \ + --zone $(gcloud config get-value compute/zone) + ``` + + +### Network + + +Delete the fixed IP addresses (assuming you named them equal to the VM names), +if any: + +```ShellSession +gcloud -q compute addresses delete controller-0 controller-1 controller-2 \ + worker-0 worker-1 worker-2 +``` + +Delete the `kubernetes-the-kubespray-way` firewall rules: + +```ShellSession +gcloud -q compute firewall-rules delete \ + kubernetes-the-kubespray-way-allow-nginx-service \ + kubernetes-the-kubespray-way-allow-internal \ + kubernetes-the-kubespray-way-allow-external +``` + +Delete the `kubernetes-the-kubespray-way` network VPC: + +```ShellSession +gcloud -q compute networks subnets delete kubernetes +gcloud -q compute networks delete kubernetes-the-kubespray-way +``` diff --git a/kubespray/docs/test_cases.md b/kubespray/docs/test_cases.md new file mode 100644 index 0000000..1fdce68 --- /dev/null +++ b/kubespray/docs/test_cases.md @@ -0,0 +1,33 @@ +# Node Layouts + +There are four node layout types: `default`, `separate`, `ha`, and `scale`. + +`default` is a non-HA two nodes setup with one separate `kube_node` +and the `etcd` group merged with the `kube_control_plane`. + +`separate` layout is when there is only node of each type, which includes + a kube_control_plane, kube_node, and etcd cluster member. + +`ha` layout consists of two etcd nodes, two control planes and a single worker node, +with role intersection. + +`scale` layout can be combined with above layouts (`ha-scale`, `separate-scale`). It includes 200 fake hosts +in the Ansible inventory. This helps test TLS certificate generation at scale +to prevent regressions and profile certain long-running tasks. These nodes are +never actually deployed, but certificates are generated for them. + +Note, the canal network plugin deploys flannel as well plus calico policy controller. + +## Test cases + +The [CI Matrix](/docs/ci.md) displays OS, Network Plugin and Container Manager tested. + +All tests are breakdown into 3 "stages" ("Stage" means a build step of the build pipeline) as follows: + +- _unit_tests_: Linting, markdown, vagrant & terraform validation etc... +- _part1_: Molecule and AIO tests +- _part2_: Standard tests with different layouts and OS/Runtime/Network +- _part3_: Upgrade jobs, terraform jobs and recover control plane tests +- _special_: Other jobs (manuals) + +The steps are ordered as `unit_tests->part1->part2->part3->special`. diff --git a/kubespray/docs/uoslinux.md b/kubespray/docs/uoslinux.md new file mode 100644 index 0000000..1078389 --- /dev/null +++ b/kubespray/docs/uoslinux.md @@ -0,0 +1,9 @@ +# UOS Linux + +UOS Linux(UnionTech OS Server 20) is supported with docker and containerd runtimes. + +**Note:** that UOS Linux is not currently covered in kubespray CI and +support for it is currently considered experimental. + +There are no special considerations for using UOS Linux as the target OS +for Kubespray deployments. diff --git a/kubespray/docs/upgrades.md b/kubespray/docs/upgrades.md new file mode 100644 index 0000000..22d81d5 --- /dev/null +++ b/kubespray/docs/upgrades.md @@ -0,0 +1,401 @@ +# Upgrading Kubernetes in Kubespray + +Kubespray handles upgrades the same way it handles initial deployment. That is to +say that each component is laid down in a fixed order. + +You can also individually control versions of components by explicitly defining their +versions. Here are all version vars for each component: + +* docker_version +* docker_containerd_version (relevant when `container_manager` == `docker`) +* containerd_version (relevant when `container_manager` == `containerd`) +* kube_version +* etcd_version +* calico_version +* calico_cni_version +* weave_version +* flannel_version +* kubedns_version + +:warning: [Attempting to upgrade from an older release straight to the latest release is unsupported and likely to break something](https://github.com/kubernetes-sigs/kubespray/issues/3849#issuecomment-451386515) :warning: + +See [Multiple Upgrades](#multiple-upgrades) for how to upgrade from older Kubespray release to the latest release + +## Unsafe upgrade example + +If you wanted to upgrade just kube_version from v1.18.10 to v1.19.7, you could +deploy the following way: + +```ShellSession +ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.18.10 -e upgrade_cluster_setup=true +``` + +And then repeat with v1.19.7 as kube_version: + +```ShellSession +ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.19.7 -e upgrade_cluster_setup=true +``` + +The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migrate the deploys of e.g kube-apiserver inside the cluster immediately which is usually only done in the graceful upgrade. (Refer to [#4139](https://github.com/kubernetes-sigs/kubespray/issues/4139) and [#4736](https://github.com/kubernetes-sigs/kubespray/issues/4736)) + +## Graceful upgrade + +Kubespray also supports cordon, drain and uncordoning of nodes when performing +a cluster upgrade. There is a separate playbook used for this purpose. It is +important to note that upgrade-cluster.yml can only be used for upgrading an +existing cluster. That means there must be at least 1 kube_control_plane already +deployed. + +```ShellSession +ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.19.7 +``` + +After a successful upgrade, the Server Version should be updated: + +```ShellSession +$ kubectl version +Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7", GitCommit:"1dd5338295409edcfff11505e7bb246f0d325d15", GitTreeState:"clean", BuildDate:"2021-01-13T13:23:52Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"} +Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7", GitCommit:"1dd5338295409edcfff11505e7bb246f0d325d15", GitTreeState:"clean", BuildDate:"2021-01-13T13:15:20Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"} +``` + +### Pausing the upgrade + +If you want to manually control the upgrade procedure, you can set some variables to pause the upgrade playbook. Pausing *before* upgrading each upgrade may be useful for inspecting pods running on that node, or performing manual actions on the node: + +* `upgrade_node_confirm: true` - This will pause the playbook execution prior to upgrading each node. The play will resume when manually approved by typing "yes" at the terminal. +* `upgrade_node_pause_seconds: 60` - This will pause the playbook execution for 60 seconds prior to upgrading each node. The play will resume automatically after 60 seconds. + +Pausing *after* upgrading each node may be useful for rebooting the node to apply kernel updates, or testing the still-cordoned node: + +* `upgrade_node_post_upgrade_confirm: true` - This will pause the playbook execution after upgrading each node, but before the node is uncordoned. The play will resume when manually approved by typing "yes" at the terminal. +* `upgrade_node_post_upgrade_pause_seconds: 60` - This will pause the playbook execution for 60 seconds after upgrading each node, but before the node is uncordoned. The play will resume automatically after 60 seconds. + +## Node-based upgrade + +If you don't want to upgrade all nodes in one run, you can use `--limit` [patterns](https://docs.ansible.com/ansible/latest/user_guide/intro_patterns.html#patterns-and-ansible-playbook-flags). + +Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes: + +```ShellSession +ansible-playbook facts.yml -b -i inventory/sample/hosts.ini +``` + +After this upgrade control plane and etcd groups [#5147](https://github.com/kubernetes-sigs/kubespray/issues/5147): + +```ShellSession +ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 --limit "kube_control_plane:etcd" +``` + +Now you can upgrade other nodes in any order and quantity: + +```ShellSession +ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 --limit "node4:node6:node7:node12" +ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 --limit "node5*" +``` + +## Multiple upgrades + +:warning: [Do not skip releases when upgrading--upgrade by one tag at a time.](https://github.com/kubernetes-sigs/kubespray/issues/3849#issuecomment-451386515) :warning: + +For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check out the next tag, and run the next upgrade, etc. + +Assuming you don't explicitly define a kubernetes version in your k8s_cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook + +* If you do define kubernetes version in your inventory (e.g. group_vars/k8s_cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3` + + Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars. + +The below example shows taking a cluster that was set up for v2.6.0 up to v2.10.0 + +```ShellSession +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 1h v1.10.4 +boomer Ready master,node 42m v1.10.4 +caprica Ready master,node 42m v1.10.4 + +$ git describe --tags +v2.6.0 + +$ git tag +... +v2.6.0 +v2.7.0 +v2.8.0 +v2.8.1 +v2.8.2 +... + +$ git checkout v2.7.0 +Previous HEAD position was 8b3ce6e4 bump upgrade tests to v2.5.0 commit (#3087) +HEAD is now at 05dabb7e Fix Bionic networking restart error #3430 (#3431) + +# NOTE: May need to `pip3 install -r requirements.txt` when upgrading. + +ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml + +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 1h v1.11.3 +boomer Ready master,node 1h v1.11.3 +caprica Ready master,node 1h v1.11.3 + +$ git checkout v2.8.0 +Previous HEAD position was 05dabb7e Fix Bionic networking restart error #3430 (#3431) +HEAD is now at 9051aa52 Fix ubuntu-contiv test failed (#3808) +``` + +:info: NOTE: Review changes between the sample inventory and your inventory when upgrading versions. :info: + +Some deprecations between versions that mean you can't just upgrade straight from 2.7.0 to 2.8.0 if you started with the sample inventory. + +In this case, I set "kubeadm_enabled" to false, knowing that it is deprecated and removed by 2.9.0, to delay converting the cluster to kubeadm as long as I could. + +```ShellSession +$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml +... + "msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release." +... +Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden): +yes +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 114m v1.12.3 +boomer Ready master,node 114m v1.12.3 +caprica Ready master,node 114m v1.12.3 + +$ git checkout v2.8.1 +Previous HEAD position was 9051aa52 Fix ubuntu-contiv test failed (#3808) +HEAD is now at 2ac1c756 More Feature/2.8 backports for 2.8.1 (#3911) + +$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml +... + "msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release." +... +Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden): +yes +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 2h36m v1.12.4 +boomer Ready master,node 2h36m v1.12.4 +caprica Ready master,node 2h36m v1.12.4 + +$ git checkout v2.8.2 +Previous HEAD position was 2ac1c756 More Feature/2.8 backports for 2.8.1 (#3911) +HEAD is now at 4167807f Upgrade to 1.12.5 (#4066) + +$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml +... + "msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release." +... +Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden): +yes +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 3h3m v1.12.5 +boomer Ready master,node 3h3m v1.12.5 +caprica Ready master,node 3h3m v1.12.5 + +$ git checkout v2.8.3 +Previous HEAD position was 4167807f Upgrade to 1.12.5 (#4066) +HEAD is now at ea41fc5e backport cve-2019-5736 to release-2.8 (#4234) + +$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml +... + "msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release." +... +Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden): +yes +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 5h18m v1.12.5 +boomer Ready master,node 5h18m v1.12.5 +caprica Ready master,node 5h18m v1.12.5 + +$ git checkout v2.8.4 +Previous HEAD position was ea41fc5e backport cve-2019-5736 to release-2.8 (#4234) +HEAD is now at 3901480b go to k8s 1.12.7 (#4400) + +$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml +... + "msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release." +... +Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden): +yes +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 5h37m v1.12.7 +boomer Ready master,node 5h37m v1.12.7 +caprica Ready master,node 5h37m v1.12.7 + +$ git checkout v2.8.5 +Previous HEAD position was 3901480b go to k8s 1.12.7 (#4400) +HEAD is now at 6f97687d Release 2.8 robust san handling (#4478) + +$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml +... + "msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release." +... +Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden): +yes +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 5h45m v1.12.7 +boomer Ready master,node 5h45m v1.12.7 +caprica Ready master,node 5h45m v1.12.7 + +$ git checkout v2.9.0 +Previous HEAD position was 6f97687d Release 2.8 robust san handling (#4478) +HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471) +``` + +:warning: IMPORTANT: Some of the variable formats changed in the k8s_cluster.yml between 2.8.5 and 2.9.0 :warning: + +If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run. + +It is at this point the cluster was upgraded from non-kubeadm to kubeadm as per the deprecation warning. + +```ShellSession +ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml + +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 6h54m v1.13.5 +boomer Ready master,node 6h55m v1.13.5 +caprica Ready master,node 6h54m v1.13.5 + +# Watch out: 2.10.0 is hiding between 2.1.2 and 2.2.0 + +$ git tag +... +v2.1.0 +v2.1.1 +v2.1.2 +v2.10.0 +v2.2.0 +... + +$ git checkout v2.10.0 +Previous HEAD position was a4e65c7c Upgrade to Ansible >2.7.0 (#4471) +HEAD is now at dcd9c950 Add etcd role dependency on kube user to avoid etcd role failure when running scale.yml with a fresh node. (#3240) (#4479) + +ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml + +... + +$ kubectl get node +NAME STATUS ROLES AGE VERSION +apollo Ready master,node 7h40m v1.14.1 +boomer Ready master,node 7h40m v1.14.1 +caprica Ready master,node 7h40m v1.14.1 + + +``` + +## Upgrading to v2.19 + +`etcd_kubeadm_enabled` is being deprecated at v2.19. The same functionality is achievable by setting `etcd_deployment_type` to `kubeadm`. +Deploying etcd using kubeadm is experimental and is only available for either new or deployments where `etcd_kubeadm_enabled` was set to `true` while deploying the cluster. + +From 2.19 and onward `etcd_deployment_type` variable will be placed in `group_vars/all/etcd.yml` instead of `group_vars/etcd.yml`, due to scope issues. +The placement of the variable is only important for `etcd_deployment_type: kubeadm` right now. However, since this might change in future updates, it is recommended to move the variable. + +Upgrading is straightforward; no changes are required if `etcd_kubeadm_enabled` was not set to `true` when deploying. + +If you have a cluster where `etcd` was deployed using `kubeadm`, you will need to remove `etcd_kubeadm_enabled` the variable. Then move `etcd_deployment_type` variable from `group_vars/etcd.yml` to `group_vars/all/etcd.yml` due to scope issues and set `etcd_deployment_type` to `kubeadm`. + +## Upgrade order + +As mentioned above, components are upgraded in the order in which they were +installed in the Ansible playbook. The order of component installation is as +follows: + +* Docker +* Containerd +* etcd +* kubelet and kube-proxy +* network_plugin (such as Calico or Weave) +* kube-apiserver, kube-scheduler, and kube-controller-manager +* Add-ons (such as KubeDNS) + +### Component-based upgrades + +A deployer may want to upgrade specific components in order to minimize risk +or save time. This strategy is not covered by CI as of this writing, so it is +not guaranteed to work. + +These commands are useful only for upgrading fully-deployed, healthy, existing +hosts. This will definitely not work for undeployed or partially deployed +hosts. + +Upgrade docker: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker +``` + +Upgrade etcd: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd +``` + +Upgrade etcd without rotating etcd certs: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd --limit=etcd --skip-tags=etcd-secrets +``` + +Upgrade kubelet: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens +``` + +Upgrade Kubernetes master components: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master +``` + +Upgrade network plugins: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network +``` + +Upgrade all add-ons: + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps +``` + +Upgrade just helm (assuming `helm_enabled` is true): + +```ShellSession +ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm +``` + +## Migrate from Docker to Containerd + +Please note that **migrating container engines is not officially supported by Kubespray**. While this procedure can be used to migrate your cluster, it applies to one particular scenario and will likely evolve over time. At the moment, they are intended as an additional resource to provide insight into how these steps can be officially integrated into the Kubespray playbooks. + +As of Kubespray 2.18.0, containerd is already the default container engine. If you have the chance, it is advisable and safer to reset and redeploy the entire cluster with a new container engine. + +* [Migrating from Docker do Containerd](upgrades/migrate_docker2containerd.md) diff --git a/kubespray/docs/upgrades/migrate_docker2containerd.md b/kubespray/docs/upgrades/migrate_docker2containerd.md new file mode 100644 index 0000000..df2e06c --- /dev/null +++ b/kubespray/docs/upgrades/migrate_docker2containerd.md @@ -0,0 +1,106 @@ +# Migrating from Docker to Containerd + +â—MAKE SURE YOU READ BEFORE PROCEEDINGâ— + +**Migrating container engines is not officially supported by Kubespray**. The following procedure covers one particular scenario and involves manual steps, along with multiple runs of `cluster.yml`. It provides no guarantees that it will actually work or that any further action is needed. Please, consider these instructions as experimental guidelines. While they can be used to migrate your cluster, they will likely evolve over time. At the moment, they are intended as an additional resource to provide insight into how these steps can be officially integrated into the Kubespray playbooks. + +As of Kubespray 2.18.0, containerd is already the default container engine. If you have the chance, it is still advisable and safer to reset and redeploy the entire cluster with a new container engine. + +Input and feedback are always appreciated. + +## Tested environment + +Nodes: Ubuntu 18.04 LTS\ +Cloud Provider: None (baremetal or VMs)\ +Kubernetes version: 1.21.5\ +Kubespray version: 2.18.0 + +## Important considerations + +If you require minimum downtime, nodes need to be cordoned and drained before being processed, one by one. If you wish to run `cluster.yml` only once and get it all done in one swoop, downtime will be significantly higher. Docker will need to be manually removed from all nodes before the playbook runs (see [#8431](https://github.com/kubernetes-sigs/kubespray/issues/8431)). For minimum downtime, the following steps will be executed multiple times, once for each node. + +Processing nodes one by one also means you will not be able to update any other cluster configuration using Kubespray before this procedure is finished and the cluster is fully migrated. + +Everything done here requires full root access to every node. + +## Migration steps + +Before you begin, adjust your inventory: + +```yaml +# Filename: k8s_cluster/k8s-cluster.yml +resolvconf_mode: host_resolvconf +container_manager: containerd + +# Filename: etcd.yml +etcd_deployment_type: host +``` + +### 1) Pick one or more nodes for processing + +It is still unclear how the order might affect this procedure. So, to be sure, it might be best to start with the control plane and etcd nodes all together, followed by each worker node individually. + +### 2) Cordon and drain the node + +... because, downtime. + +### 3) Stop docker and kubelet daemons + +```commandline +service kubelet stop +service docker stop +``` + +### 4) Uninstall docker + dependencies + +```commandline +apt-get remove -y --allow-change-held-packages containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras +``` + +In some cases, there might a `pigz` missing dependency. Some image layers need this to be extracted. + +```shell +apt-get install pigz +``` + +### 5) Run `cluster.yml` playbook with `--limit` + +```commandline +ansible-playbook cluster.yml -i inventory/sample/hosts.ini cluster.yml --limit=NODENAME +``` + +This effectively reinstalls containerd and seems to place all config files in the right place. When this completes, kubelet will immediately pick up the new container engine and start spinning up DaemonSets and kube-system Pods. + +Optionally, if you feel confident, you can remove `/var/lib/docker` anytime after this step. + +```commandline +rm -fr /var/lib/docker +``` + +You can watch new containers using `crictl`. + +```commandline +crictl ps -a +``` + +### 6) Replace the cri-socket node annotation + +Node annotations need to be adjusted. Kubespray will not do this, but a simple kubectl is enough. + +```commandline +kubectl annotate node NODENAME --overwrite kubeadm.alpha.kubernetes.io/cri-socket=/var/run/containerd/containerd.sock +``` + +The annotation is required by kubeadm to follow through future cluster upgrades. + +### 7) Reboot the node + +Reboot, just to make sure everything restarts fresh before the node is uncordoned. + +## After thoughts + +If your cluster runs a log aggregator, like fluentd+Graylog, you will likely need to adjust collection filters and parsers. While docker generates Json logs, containerd has its own space delimited format. Example: + +```text +2020-01-10T18:10:40.01576219Z stdout F application log message... +``` diff --git a/kubespray/docs/vagrant.md b/kubespray/docs/vagrant.md new file mode 100644 index 0000000..b7f702c --- /dev/null +++ b/kubespray/docs/vagrant.md @@ -0,0 +1,164 @@ +# Vagrant + +Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or +vmware, but is untested) you should be able to launch a 3 node Kubernetes +cluster by simply running `vagrant up`. + +This will spin up 3 VMs and install kubernetes on them. +Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`. + +To give an estimate of the expected duration of a provisioning run: +On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 +to 15 minutes, once the container images and other files are cached. +Note that libvirt/qemu is recommended over virtualbox as it is quite a bit +faster, especially during boot-up time. + +For proper performance a minimum of 12GB RAM is recommended. +It is possible to run a 3 node cluster on a laptop with 8GB of RAM using +the default Vagrantfile, provided you have 8GB zram swap configured and +not much more than a browser and a mail client running. +If you decide to run on such a machine, then also make sure that any tmpfs +devices, that are mounted, are mostly empty and disable any swapfiles +mounted on HDD/SSD or you will be in for some serious swap-madness. +Things can get a bit sluggish during provisioning, but when that's done, +the system will actually be able to perform quite well. + +## Customize Vagrant + +You can override the default settings in the `Vagrantfile` either by +directly modifying the `Vagrantfile` or through an override file. +In the same directory as the `Vagrantfile`, create a folder called +`vagrant` and create `config.rb` file in it. +An example of how to configure this file is given below. + +## Use alternative OS for Vagrant + +By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. +You may use an alternative supported operating system for your local cluster. + +Customize `$os` variable in `Vagrantfile` or as override, e.g.,: + +```ShellSession +echo '$os = "flatcar-stable"' >> vagrant/config.rb +``` + +The supported operating systems for vagrant are defined in the `SUPPORTED_OS` +constant in the `Vagrantfile`. + +## File and image caching + +Kubespray can take quite a while to start on a laptop. To improve provisioning +speed, the variable 'download_run_once' is set. This will make kubespray +download all files and containers just once and then redistributes them to +the other nodes and as a bonus, also cache all downloads locally and re-use +them on the next provisioning run. For more information on download settings +see [download documentation](/docs/downloads.md). + +## Example use of Vagrant + +The following is an example of setting up and running kubespray using `vagrant`. +For repeated runs, you could save the script to a file in the root of the +kubespray and run it by executing `source `. + +```ShellSession +# use virtualenv to install all python requirements +VENVDIR=venv +virtualenv --python=/usr/bin/python3.7 $VENVDIR +source $VENVDIR/bin/activate +pip install -r requirements.txt + +# prepare an inventory to test with +INV=inventory/my_lab +rm -rf ${INV}.bak &> /dev/null +mv ${INV} ${INV}.bak &> /dev/null +cp -a inventory/sample ${INV} +rm -f ${INV}/hosts.ini + +# customize the vagrant environment +mkdir vagrant +cat << EOF > vagrant/config.rb +\$instance_name_prefix = "kub" +\$vm_cpus = 1 +\$num_instances = 3 +\$os = "centos-bento" +\$subnet = "10.0.20" +\$network_plugin = "flannel" +\$inventory = "$INV" +\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" } +EOF + +# make the rpm cache +mkdir -p temp/docker_rpms + +vagrant up + +# make a copy of the downloaded docker rpm, to speed up the next provisioning run +scp kub-1:/var/cache/yum/x86_64/7/docker-ce/packages/* temp/docker_rpms/ + +# copy kubectl access configuration in place +mkdir $HOME/.kube/ &> /dev/null +ln -s $PWD/$INV/artifacts/admin.conf $HOME/.kube/config +# make the kubectl binary available +sudo ln -s $PWD/$INV/artifacts/kubectl /usr/local/bin/kubectl +#or +export PATH=$PATH:$PWD/$INV/artifacts +``` + +If a vagrant run failed and you've made some changes to fix the issue causing +the fail, here is how you would re-run ansible: + +```ShellSession +ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml +``` + +If all went well, you check if it's all working as expected: + +```ShellSession +kubectl get nodes +``` + +The output should look like this: + +```ShellSession +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +kub-1 Ready control-plane,master 4m37s v1.22.5 +kub-2 Ready control-plane,master 4m7s v1.22.5 +kub-3 Ready 3m7s v1.22.5 +``` + +Another nice test is the following: + +```ShellSession +kubectl get pods --all-namespaces -o wide +``` + +Which should yield something like the following: + +```ShellSession +$ kubectl get pods --all-namespaces -o wide +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system coredns-8474476ff8-m2469 1/1 Running 0 2m45s 10.233.65.2 kub-2 +kube-system coredns-8474476ff8-v5wzj 1/1 Running 0 2m41s 10.233.64.3 kub-1 +kube-system dns-autoscaler-5ffdc7f89d-76tnv 1/1 Running 0 2m43s 10.233.64.2 kub-1 +kube-system kube-apiserver-kub-1 1/1 Running 1 4m54s 10.0.20.101 kub-1 +kube-system kube-apiserver-kub-2 1/1 Running 1 4m33s 10.0.20.102 kub-2 +kube-system kube-controller-manager-kub-1 1/1 Running 1 5m1s 10.0.20.101 kub-1 +kube-system kube-controller-manager-kub-2 1/1 Running 1 4m33s 10.0.20.102 kub-2 +kube-system kube-flannel-9xgf5 1/1 Running 0 3m10s 10.0.20.102 kub-2 +kube-system kube-flannel-l8jbl 1/1 Running 0 3m10s 10.0.20.101 kub-1 +kube-system kube-flannel-zss4t 1/1 Running 0 3m10s 10.0.20.103 kub-3 +kube-system kube-multus-ds-amd64-bhpc9 1/1 Running 0 3m2s 10.0.20.103 kub-3 +kube-system kube-multus-ds-amd64-n6vl8 1/1 Running 0 3m2s 10.0.20.102 kub-2 +kube-system kube-multus-ds-amd64-qttgs 1/1 Running 0 3m2s 10.0.20.101 kub-1 +kube-system kube-proxy-2x4jl 1/1 Running 0 3m33s 10.0.20.101 kub-1 +kube-system kube-proxy-d48r7 1/1 Running 0 3m33s 10.0.20.103 kub-3 +kube-system kube-proxy-f45lp 1/1 Running 0 3m33s 10.0.20.102 kub-2 +kube-system kube-scheduler-kub-1 1/1 Running 1 4m54s 10.0.20.101 kub-1 +kube-system kube-scheduler-kub-2 1/1 Running 1 4m33s 10.0.20.102 kub-2 +kube-system nginx-proxy-kub-3 1/1 Running 0 3m33s 10.0.20.103 kub-3 +kube-system nodelocaldns-cg9tz 1/1 Running 0 2m41s 10.0.20.102 kub-2 +kube-system nodelocaldns-htswt 1/1 Running 0 2m41s 10.0.20.103 kub-3 +kube-system nodelocaldns-nsp7s 1/1 Running 0 2m41s 10.0.20.101 kub-1 +local-path-storage local-path-provisioner-66df45bfdd-km4zg 1/1 Running 0 2m54s 10.233.66.2 kub-3 +``` diff --git a/kubespray/docs/vars.md b/kubespray/docs/vars.md new file mode 100644 index 0000000..7680ab2 --- /dev/null +++ b/kubespray/docs/vars.md @@ -0,0 +1,307 @@ +# Configurable Parameters in Kubespray + +## Generic Ansible variables + +You can view facts gathered by Ansible automatically +[here](https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#ansible-facts). + +Some variables of note include: + +* *ansible_user*: user to connect to via SSH +* *ansible_default_ipv4.address*: IP address Ansible automatically chooses. + Generated based on the output from the command ``ip -4 route get 8.8.8.8`` + +## Common vars that are used in Kubespray + +* *calico_version* - Specify version of Calico to use +* *calico_cni_version* - Specify version of Calico CNI plugin to use +* *docker_version* - Specify version of Docker to use (should be quoted + string). Must match one of the keys defined for *docker_versioned_pkg* + in `roles/container-engine/docker/vars/*.yml`. +* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd` +* *docker_containerd_version* - Specify which version of containerd to use when setting `container_manager` to `docker` +* *etcd_version* - Specify version of ETCD to use +* *calico_ipip_mode* - Configures Calico ipip encapsulation - valid values are 'Never', 'Always' and 'CrossSubnet' (default 'Never') +* *calico_vxlan_mode* - Configures Calico vxlan encapsulation - valid values are 'Never', 'Always' and 'CrossSubnet' (default 'Always') +* *calico_network_backend* - Configures Calico network backend - valid values are 'none', 'bird' and 'vxlan' (default 'vxlan') +* *kube_network_plugin* - Sets k8s network plugin (default Calico) +* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode +* *kube_version* - Specify a given Kubernetes version +* *searchdomains* - Array of DNS domains to search when looking up hostnames +* *remove_default_searchdomains* - Boolean that removes the default searchdomain +* *nameservers* - Array of nameservers to use for DNS lookup +* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled. + +## Addressing variables + +* *ip* - IP to use for binding services (host var) +* *access_ip* - IP for other hosts to use to connect to. Often required when + deploying from a cloud, such as OpenStack or GCE and you have separate + public/floating and private IPs. +* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip + and access_ip are undefined +* *ip6* - IPv6 address to use for binding services. (host var) + If *enable_dual_stack_networks* is set to ``true`` and *ip6* is defined, + kubelet's ``--node-ip`` and node's ``InternalIP`` will be the combination of *ip* and *ip6*. +* *loadbalancer_apiserver* - If defined, all hosts will connect to this + address instead of localhost for kube_control_planes and kube_control_plane[0] for + kube_nodes. See more details in the + [HA guide](/docs/ha-mode.md). +* *loadbalancer_apiserver_localhost* - makes all hosts to connect to + the apiserver internally load balanced endpoint. Mutual exclusive to the + `loadbalancer_apiserver`. See more details in the + [HA guide](/docs/ha-mode.md). + +## Cluster variables + +Kubernetes needs some parameters in order to get deployed. These are the +following default cluster parameters: + +* *cluster_name* - Name of cluster (default is cluster.local) + +* *container_manager* - Container Runtime to install in the nodes (default is containerd) + +* *image_command_tool* - Tool used to pull images (default depends on `container_manager` + and is `nerdctl` for `containerd`, `crictl` for `crio`, `docker` for `docker`) + +* *image_command_tool_on_localhost* - Tool used to pull images on localhost + (default is equal to `image_command_tool`) + +* *dns_domain* - Name of cluster DNS domain (default is cluster.local) + +* *kube_network_plugin* - Plugin to use for container networking + +* *kube_service_addresses* - Subnet for cluster IPs (default is + 10.233.0.0/18). Must not overlap with kube_pods_subnet + +* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not + overlap with kube_service_addresses. + +* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining + bits in kube_pods_subnet dictates how many kube_nodes can be in cluster. Setting this > 25 will + raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly + (assertion not applicable to calico which doesn't use this as a hard limit, see + [Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes). + +* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services. + +* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``. + +* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``. + +* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster. + +* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3) + +* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4) + +* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/) + on the CoreDNS service. + +* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled + (default is k8s_external.local) + +* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin. + on the CoreDNS service. + +* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or + OpenStack (default is unset) + +* *kube_feature_gates* - A list of key=value pairs that describe feature gates for + alpha/experimental Kubernetes features. (defaults is `[]`). + Additionally, you can use also the following variables to individually customize your kubernetes components installation (they works exactly like `kube_feature_gates`): + * *kube_apiserver_feature_gates* + * *kube_controller_feature_gates* + * *kube_scheduler_feature_gates* + * *kube_proxy_feature_gates* + * *kubelet_feature_gates* + +* *kubeadm_feature_gates* - A list of key=value pairs that describe feature gates for + alpha/experimental Kubeadm features. (defaults is `[]`) + +* *authorization_modes* - A list of [authorization mode]( + https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) + that the cluster should be configured for. Defaults to `['Node', 'RBAC']` + (Node and RBAC authorizers). + Note: `Node` and `RBAC` are enabled by default. Previously deployed clusters can be + converted to RBAC mode. However, your apps which rely on Kubernetes API will + require a service account and cluster role bindings. You can override this + setting by setting authorization_modes to `[]`. + +* *kube_apiserver_admission_control_config_file* - Enable configuration for `kube-apiserver` admission plugins. + Currently this variable allow you to configure the `EventRateLimit` admission plugin. + + To configure the **EventRateLimit** plugin you have to define a data structure like this: + +```yml +kube_apiserver_admission_event_rate_limits: + limit_1: + type: Namespace + qps: 50 + burst: 100 + cache_size: 2000 + limit_2: + type: User + qps: 50 + burst: 100 + ... +``` + +* *kube_apiserver_service_account_lookup* - Enable validation service account before validating token. Default `true`. + +Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' +private addresses, make sure to pick another values for ``kube_service_addresses`` +and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. + +## Enabling Dual Stack (IPV4 + IPV6) networking + +If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services. + +## DNS variables + +By default, hosts are set up with 8.8.8.8 as an upstream DNS server and all +other settings from your existing /etc/resolv.conf are lost. Set the following +variables to match your requirements. + +* *upstream_dns_servers* - Array of upstream DNS servers configured on host in + addition to Kubespray deployed DNS +* *nameservers* - Array of DNS servers configured for use by hosts +* *searchdomains* - Array of up to 4 search domains +* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains. +* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns +* *dns_upstream_forward_extra_opts* - Options to add in the forward section of coredns/nodelocaldns related to upstream DNS servers + +For more information, see [DNS +Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md). + +## Other service variables + +* *docker_options* - Commonly used to set + ``--insecure-registry=myregistry.mydomain:5000`` + +* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install. + +* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin. + +* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin. + [Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars. + +* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a + proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames + that correspond to each node. + +* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet. + By default autodetection is used to match container manager configuration. + `systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`. + +* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates + from the kube-apiserver when the certificate expiration approaches. + +* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates + from the kube-apiserver when the certificate expiration approaches. + **Note** that server certificates are **not** approved automatically. Approve them manually + (`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like + [kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp). + +* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed. + +* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host. + +* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation. + + **N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exists, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2). + + * *kubelet_secure_addresses* - By default *kubelet_systemd_hardening* set the **control plane** `ansible_host` IPs as the `kubelet_secure_addresses`. In case you have multiple interfaces in your control plane nodes and the `kube-apiserver` is not bound to the default interface, you can override them with this variable. + Example: + + The **control plane** node may have 2 interfaces with the following IP addresses: `eth0:10.0.0.110`, `eth1:192.168.1.110`. + + By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`. + +* *node_labels* - Labels applied to nodes via `kubectl label node`. + For example, labels can be set in the inventory as variables or more widely in group_vars. + *node_labels* can only be defined as a dict: + +```yml +node_labels: + label1_name: label1_value + label2_name: label2_value +``` + +* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter. + For example, taints can be set in the inventory as variables or more widely in group_vars. + *node_taints* has to be defined as a list of strings in format `key=value:effect`, e.g.: + +```yml +node_taints: + - "node.example.com/external=true:NoSchedule" +``` + +* *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces). + Addons deployed in kube-system namespaces are handled. +* *kubernetes_audit* - When set to `true`, enables Auditing. + The auditing parameters can be tuned via the following variables (which default values are shown below): + * `audit_log_path`: /var/log/audit/kube-apiserver-audit.log + * `audit_log_maxage`: 30 + * `audit_log_maxbackups`: 1 + * `audit_log_maxsize`: 100 + * `audit_policy_file`: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml" + + By default, the `audit_policy_file` contains [default rules](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2) that can be overridden with the `audit_policy_custom_rules` variable. +* *kubernetes_audit_webhook* - When set to `true`, enables the webhook audit backend. + The webhook parameters can be tuned via the following variables (which default values are shown below): + * `audit_webhook_config_file`: "{{ kube_config_dir }}/audit-policy/apiserver-audit-webhook-config.yaml" + * `audit_webhook_server_url`: `"https://audit.app"` + * `audit_webhook_server_extra_args`: {} + * `audit_webhook_mode`: batch + * `audit_webhook_batch_max_size`: 100 + * `audit_webhook_batch_max_wait`: 1s + +### Custom flags for Kube Components + +For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. + +Extra flags for the kubelet can be specified using these variables, +in the form of dicts of key-value pairs of configuration parameters that will be inserted into the kubelet YAML config file. The `kubelet_node_config_extra_args` apply kubelet settings only to nodes and not control planes. Example: + +```yml +kubelet_config_extra_args: + evictionHard: + memory.available: "100Mi" + evictionSoftGracePeriod: + memory.available: "30s" + evictionSoft: + memory.available: "300Mi" +``` + +The possible vars are: + +* *kubelet_config_extra_args* +* *kubelet_node_config_extra_args* + +Previously, the same parameters could be passed as flags to kubelet binary with the following vars: + +* *kubelet_custom_flags* +* *kubelet_node_custom_flags* + +The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not control planes. Example: + +```yml +kubelet_custom_flags: + - "--eviction-hard=memory.available<100Mi" + - "--eviction-soft-grace-period=memory.available=30s" + - "--eviction-soft=memory.available<300Mi" +``` + +This alternative is deprecated and will remain until the flags are completely removed from kubelet + +Extra flags for the API server, controller, and scheduler components can be specified using these variables, +in the form of dicts of key-value pairs of configuration parameters that will be inserted into the kubeadm YAML config file: + +* *kube_kubeadm_apiserver_extra_args* +* *kube_kubeadm_controller_extra_args* +* *kube_kubeadm_scheduler_extra_args* + +## App variables + +* *helm_version* - Only supports v3.x. Existing v2 installs (with Tiller) will not be modified and need to be removed manually. diff --git a/kubespray/docs/vsphere-csi.md b/kubespray/docs/vsphere-csi.md new file mode 100644 index 0000000..a399d4b --- /dev/null +++ b/kubespray/docs/vsphere-csi.md @@ -0,0 +1,102 @@ +# vSphere CSI Driver + +vSphere CSI driver allows you to provision volumes over a vSphere deployment. The Kubernetes historic in-tree cloud provider is deprecated and will be removed in future versions. + +## Prerequisites + +The vSphere user for CSI driver requires a set of privileges to perform Cloud Native Storage operations. Follow the [official guide](https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/prerequisites.html#roles_and_privileges) to configure those. + +## Kubespray configuration + +To enable vSphere CSI driver, uncomment the `vsphere_csi_enabled` option in `group_vars/all/vsphere.yml` and set it to `true`. + +To set the number of replicas for the vSphere CSI controller, you can change `vsphere_csi_controller_replicas` option in `group_vars/all/vsphere.yml`. + +You need to source the vSphere credentials you use to deploy your machines that will host Kubernetes. + +| Variable | Required | Type | Choices | Default | Comment | +|---------------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------| +| external_vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | +| external_vsphere_vcenter_port | TRUE | string | | "443" | Port of the vCenter API | +| external_vsphere_insecure | TRUE | string | "true", "false" | "true" | set to "true" if the host above uses a self-signed cert | +| external_vsphere_user | TRUE | string | | | User name for vCenter with required privileges (Can also be specified with the `VSPHERE_USER` environment variable) | +| external_vsphere_password | TRUE | string | | | Password for vCenter (Can also be specified with the `VSPHERE_PASSWORD` environment variable) | +| external_vsphere_datacenter | TRUE | string | | | Datacenter name to use | +| external_vsphere_kubernetes_cluster_id | TRUE | string | | "kubernetes-cluster-id" | Kubernetes cluster ID to use | +| external_vsphere_version | TRUE | string | | "6.7u3" | Vmware Vsphere version where located all VMs | +| external_vsphere_cloud_controller_image_tag | TRUE | string | | "latest" | Kubernetes cluster ID to use | +| vsphere_syncer_image_tag | TRUE | string | | "v2.2.1" | Syncer image tag to use | +| vsphere_csi_attacher_image_tag | TRUE | string | | "v3.1.0" | CSI attacher image tag to use | +| vsphere_csi_controller | TRUE | string | | "v2.2.1" | CSI controller image tag to use | +| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller | +| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use | +| vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use | +| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrar image tag to use | +| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use | +| vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use +| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy | +| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state | +| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state | +| vsphere_csi_namespace | TRUE | string | | "kube-system" | vSphere CSI namespace to use; kube-system for backward compatibility, should be change to vmware-system-csi on the long run | + +## Usage example + +To test the dynamic provisioning using vSphere CSI driver, make sure to create a [storage policy](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#create-a-storage-policy) and [storage class](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#create-a-storageclass), then apply the following manifest: + +```yml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: csi-pvc-vsphere +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: Space-Efficient + +--- +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - mountPath: /usr/share/nginx/html + name: csi-data-vsphere + volumes: + - name: csi-data-vsphere + persistentVolumeClaim: + claimName: csi-pvc-vsphere + readOnly: false +``` + +Apply this conf to your cluster: ```kubectl apply -f nginx.yml``` + +You should see the PVC provisioned and bound: + +```ShellSession +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +csi-pvc-vsphere Bound pvc-dc7b1d21-ee41-45e1-98d9-e877cc1533ac 1Gi RWO Space-Efficient 10s +``` + +And the volume mounted to the Nginx Pod (wait until the Pod is Running): + +```ShellSession +kubectl exec -it nginx -- df -h | grep /usr/share/nginx/html +/dev/sdb 976M 2.6M 907M 1% /usr/share/nginx/html +``` + +## More info + +For further information about the vSphere CSI Driver, you can refer to the official [vSphere Cloud Provider documentation](https://cloud-provider-vsphere.sigs.k8s.io/container_storage_interface.html). diff --git a/kubespray/docs/vsphere.md b/kubespray/docs/vsphere.md new file mode 100644 index 0000000..6216df4 --- /dev/null +++ b/kubespray/docs/vsphere.md @@ -0,0 +1,134 @@ +# vSphere + +Kubespray can be deployed with vSphere as Cloud provider. This feature supports: + +- Volumes +- Persistent Volumes +- Storage Classes and provisioning of volumes +- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes + +## Out-of-tree vSphere cloud provider + +### Prerequisites + +You need at first to configure your vSphere environment by following the [official documentation](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#prerequisites). + +After this step you should have: + +- vSphere upgraded to 6.7 U3 or later +- VM hardware upgraded to version 15 or higher +- UUID activated for each VM where Kubernetes will be deployed + +### Kubespray configuration + +First in `inventory/sample/group_vars/all/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`. + +```yml +cloud_provider: "external" +external_cloud_provider: "vsphere" +``` + +Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below. + +| Variable | Required | Type | Choices | Default | Comment | +|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------| +| external_vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | +| external_vsphere_vcenter_port | TRUE | string | | "443" | Port of the vCenter API | +| external_vsphere_insecure | TRUE | string | "true", "false" | "true" | set to "true" if the host above uses a self-signed cert | +| external_vsphere_user | TRUE | string | | | User name for vCenter with required privileges (Can also be specified with the `VSPHERE_USER` environment variable) | +| external_vsphere_password | TRUE | string | | | Password for vCenter (Can also be specified with the `VSPHERE_PASSWORD` environment variable) | +| external_vsphere_datacenter | TRUE | string | | | Datacenter name to use | +| external_vsphere_kubernetes_cluster_id | TRUE | string | | "kubernetes-cluster-id" | Kubernetes cluster ID to use | +| vsphere_csi_enabled | TRUE | boolean | | false | Enable vSphere CSI | + +Example configuration: + +```yml +external_vsphere_vcenter_ip: "myvcenter.domain.com" +external_vsphere_vcenter_port: "443" +external_vsphere_insecure: "true" +external_vsphere_user: "administrator@vsphere.local" +external_vsphere_password: "K8s_admin" +external_vsphere_datacenter: "DATACENTER_name" +external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" +vsphere_csi_enabled: true +``` + +For a more fine-grained CSI setup, refer to the [vsphere-csi](/docs/vsphere-csi.md) documentation. + +### Deployment + +Once the configuration is set, you can execute the playbook again to apply the new configuration: + +```ShellSession +cd kubespray +ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml +``` + +You'll find some useful examples [here](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#sample-manifests-to-test-csi-driver-functionality) to test your configuration. + +## In-tree vSphere cloud provider ([deprecated](https://cloud-provider-vsphere.sigs.k8s.io/concepts/in_tree_vs_out_of_tree.html)) + +### Prerequisites (deprecated) + +You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider). + +After this step you should have: + +- UUID activated for each VM where Kubernetes will be deployed +- A vSphere account with required privileges + +If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes. + +### Kubespray configuration (deprecated) + +First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`. + +```yml +cloud_provider: vsphere +``` + +Then, in the same file, you need to declare your vCenter credentials following the description below. + +| Variable | Required | Type | Choices | Default | Comment | +|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | +| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 | +| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert | +| vsphere_user | TRUE | string | | | User name for vCenter with required privileges | +| vsphere_password | TRUE | string | | | Password for vCenter | +| vsphere_datacenter | TRUE | string | | | Datacenter name to use | +| vsphere_datastore | TRUE | string | | | Datastore name to use | +| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed | +| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". | +| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) | +| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to | +| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) | +| vsphere_zone_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/zone` label on nodes (Optional, only used for Kubernetes >= 1.12.0) | +| vsphere_region_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/region` label on nodes (Optional, only used for Kubernetes >= 1.12.0) | + +Example configuration: + +```yml +vsphere_vcenter_ip: "myvcenter.domain.com" +vsphere_vcenter_port: 443 +vsphere_insecure: 1 +vsphere_user: "k8s@vsphere.local" +vsphere_password: "K8s_admin" +vsphere_datacenter: "DATACENTER_name" +vsphere_datastore: "DATASTORE_name" +vsphere_working_dir: "Docker_hosts" +vsphere_scsi_controller_type: "pvscsi" +vsphere_resource_pool: "K8s-Pool" +``` + +### Deployment (deprecated) + +Once the configuration is set, you can execute the playbook again to apply the new configuration: + +```ShellSession +cd kubespray +ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml +``` + +You'll find some useful examples [here](https://github.com/kubernetes/examples/tree/master/staging/volumes/vsphere) to test your configuration. diff --git a/kubespray/docs/weave.md b/kubespray/docs/weave.md new file mode 100644 index 0000000..30fa494 --- /dev/null +++ b/kubespray/docs/weave.md @@ -0,0 +1,79 @@ +# Weave + +Weave 2.0.1 is supported by kubespray + +Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consensus) mode (default mode) and [**seed**](https://www.weave.works/docs/net/latest/ipam/#seed) mode. + +`Consensus` mode is best to use on static size cluster and `seed` mode is best to use on dynamic size cluster + +Weave encryption is supported for all communication + +* To use Weave encryption, specify a strong password (if no password, no encryption) + +```ShellSession +# In file ./inventory/sample/group_vars/k8s_cluster.yml +weave_password: EnterPasswordHere +``` + +This password is used to set an environment variable inside weave container. + +Weave is deployed by kubespray using a daemonSet + +* Check the status of Weave containers + +```ShellSession +# From client +kubectl -n kube-system get pods | grep weave +# output +weave-net-50wd2 2/2 Running 0 2m +weave-net-js9rb 2/2 Running 0 2m +``` + +There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods). + +* Check status of weave (connection,encryption ...) for each node + +```ShellSession +# On nodes +curl http://127.0.0.1:6784/status +# output on node1 +Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34) + + Service: router + Protocol: weave 1..2 + Name: fa:16:3e:b3:d6:b2(node1) + Encryption: enabled + PeerDiscovery: enabled + Targets: 2 + Connections: 2 (1 established, 1 failed) + Peers: 2 (with 2 established connections) + TrustedSubnets: none + + Service: ipam + Status: ready + Range: 10.233.64.0/18 + DefaultSubnet: 10.233.64.0/18 +``` + +* Check parameters of weave for each node + +```ShellSession +# On nodes +ps -aux | grep weaver +# output on node1 (here its use seed mode) +root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19 +``` + +## Consensus mode (default mode) + +This mode is best to use on static size cluster + +### Seed mode + +This mode is best to use on dynamic size cluster + +The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployment. + +* Switch from consensus mode to seed/Observation mode + +See [weave ipam documentation](https://www.weave.works/docs/net/latest/tasks/ipam/ipam/) and use `weave_extra_args` to enable. diff --git a/kubespray/extra_playbooks/files/get_cinder_pvs.sh b/kubespray/extra_playbooks/files/get_cinder_pvs.sh new file mode 100644 index 0000000..73a088e --- /dev/null +++ b/kubespray/extra_playbooks/files/get_cinder_pvs.sh @@ -0,0 +1,2 @@ +#!/bin/sh +kubectl get pv -o go-template --template='{{ range .items }}{{ $metadata := .metadata }}{{ with $value := index .metadata.annotations "pv.kubernetes.io/provisioned-by" }}{{ if eq $value "kubernetes.io/cinder" }}{{printf "%s\n" $metadata.name}}{{ end }}{{ end }}{{ end }}' diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/all.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/all.yml new file mode 100644 index 0000000..818b295 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/all.yml @@ -0,0 +1,143 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +ansible_user: root +ansible_ssh_private_key_file: ~/.ssh/id_rsa diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/aws.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/azure.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/containerd.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/coreos.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/cri-o.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/docker.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/etcd.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/gcp.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/hcloud.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/oci.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/offline.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/openstack.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/upcloud.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/vsphere.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/etcd.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/addons.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..9c09f33 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.2 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/inventory.ini b/kubespray/extra_playbooks/inventory/dsk-dev/inventory.ini new file mode 100644 index 0000000..1376f12 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/inventory.ini @@ -0,0 +1,84 @@ +# ## Configure 'ip' variable to bind kubernetes services on a +# ## different ip than the default iface +# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value. +[all] +dsk-dev-master-a1 ansible_host=10.10.43.111 etcd_member_name=etcd1 +dsk-dev-master-b1 ansible_host=10.10.43.112 etcd_member_name=etcd2 +dsk-dev-master-c1 ansible_host=10.10.43.113 etcd_member_name=etcd3 +dsk-dev-data-druid-a1 ansible_host=10.10.43.114 +dsk-dev-data-druid-b1 ansible_host=10.10.43.115 +dsk-dev-data-druid-c1 ansible_host=10.10.43.116 +dsk-dev-data-es-a1 ansible_host=10.10.43.117 +dsk-dev-data-es-b1 ansible_host=10.10.43.118 +dsk-dev-data-es-c1 ansible_host=10.10.43.119 +dsk-dev-data-kafka-a1 ansible_host=10.10.43.120 +dsk-dev-data-kafka-b1 ansible_host=10.10.43.121 +dsk-dev-data-kafka-c1 ansible_host=10.10.43.122 +dsk-dev-process-a1 ansible_host=10.10.43.123 +dsk-dev-process-b1 ansible_host=10.10.43.124 +dsk-dev-process-c1 ansible_host=10.10.43.125 +dsk-dev-process-a2 ansible_host=10.10.43.126 +dsk-dev-process-b2 ansible_host=10.10.43.127 +dsk-dev-process-c2 ansible_host=10.10.43.128 +dsk-dev-process-a3 ansible_host=10.10.43.129 +dsk-dev-process-b3 ansible_host=10.10.43.130 +dsk-dev-process-c3 ansible_host=10.10.43.131 +dsk-dev-temp-a1 ansible_host=10.10.43.132 +dsk-dev-data-common-a1 ansible_host=10.10.43.133 +dsk-dev-data-common-b1 ansible_host=10.10.43.134 +dsk-dev-data-common-c1 ansible_host=10.10.43.135 +#dsk-dev-data-druid-a2 ansible_host=10.10.43.136 +dsk-dev-data-druid-b2 ansible_host=10.10.43.137 +dsk-dev-data-druid-c2 ansible_host=10.10.43.138 +#dsk-dev-data-druid-a3 ansible_host=10.10.43.139 +dsk-dev-temp-b1 ansible_host=10.10.43.140 +dsk-dev-temp-c1 ansible_host=10.10.43.141 +dsk-dev-prometheus ansible_host=10.10.43.142 + +[kube_control_plane] +dsk-dev-master-a1 +dsk-dev-master-b1 +dsk-dev-master-c1 + +[etcd] +dsk-dev-master-a1 +dsk-dev-master-b1 +dsk-dev-master-c1 + +[kube_node] +dsk-dev-data-druid-a1 +dsk-dev-data-druid-b1 +dsk-dev-data-druid-c1 +#dsk-dev-data-druid-a2 +dsk-dev-data-druid-b2 +dsk-dev-data-druid-c2 +#dsk-dev-data-druid-a3 +dsk-dev-data-es-a1 +dsk-dev-data-es-b1 +dsk-dev-data-es-c1 +dsk-dev-data-kafka-a1 +dsk-dev-data-kafka-b1 +dsk-dev-data-kafka-c1 +dsk-dev-data-common-a1 +dsk-dev-data-common-b1 +dsk-dev-data-common-c1 +dsk-dev-process-a1 +dsk-dev-process-b1 +dsk-dev-process-c1 +dsk-dev-process-a2 +dsk-dev-process-b2 +dsk-dev-process-c2 +dsk-dev-process-a3 +dsk-dev-process-b3 +dsk-dev-process-c3 +dsk-dev-temp-a1 +dsk-dev-temp-b1 +dsk-dev-temp-c1 +dsk-dev-prometheus + +[calico_rr] + +[k8s_cluster:children] +kube_control_plane +kube_node +calico_rr diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/patches/kube-controller-manager+merge.yaml b/kubespray/extra_playbooks/inventory/dsk-dev/patches/kube-controller-manager+merge.yaml new file mode 100644 index 0000000..a8aa5a7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/patches/kube-controller-manager+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10257' \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-dev/patches/kube-scheduler+merge.yaml b/kubespray/extra_playbooks/inventory/dsk-dev/patches/kube-scheduler+merge.yaml new file mode 100644 index 0000000..0bb3950 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-dev/patches/kube-scheduler+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10259' \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/all.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/all.yml new file mode 100644 index 0000000..70e391b --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/all.yml @@ -0,0 +1,143 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +ansible_user: root +ansible_ssh_private_key_file: /root/.ssh/id_rsa diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/aws.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/azure.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/containerd.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/coreos.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/cri-o.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/docker.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/etcd.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/gcp.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/hcloud.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/oci.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/offline.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/openstack.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/upcloud.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/vsphere.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/etcd.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/addons.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..85d4029 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,382 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential +# Apply extra options to coredns kubernetes plugin +# coredns_kubernetes_extra_opts: +# - 'fallthrough example.local' +# Forward extra domains to the coredns kubernetes plugin +# coredns_kubernetes_extra_domains: '' + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +# kubelet_runtime_cgroups: "{{ kube_reserved_cgroups }}/{{ container_manager }}.service" +# kubelet_kubelet_cgroups: "{{ kube_reserved_cgroups }}/kubelet.service" + +## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" +# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +# Optionally reserve this space for kube daemons. +# kube_reserved: true +## Uncomment to override default values +## The following two items need to be set when kube_reserved is true +# kube_reserved_cgroups_for_service_slice: kube.slice +# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" +# kube_memory_reserved: 256Mi +# kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" +# Reservation for master hosts +# kube_master_memory_reserved: 512Mi +# kube_master_cpu_reserved: 200m +# kube_master_ephemeral_storage_reserved: 2Gi +# kube_master_pid_reserved: "1000" + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +## The following two items need to be set when system_reserved is true +# system_reserved_cgroups_for_service_slice: system.slice +# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}" +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/inventory.ini b/kubespray/extra_playbooks/inventory/dsk-minio/inventory.ini new file mode 100644 index 0000000..95a6c39 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/inventory.ini @@ -0,0 +1,34 @@ +# ## Configure 'ip' variable to bind kubernetes services on a +# ## different ip than the default iface +# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value. +[all] +dsk-minio-master1 ansible_host=10.10.43.235 etcd_member_name=etcd1 +dsk-minio-worker1 ansible_host=10.10.43.236 + + +# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1 +# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2 +# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3 +# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4 +# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5 +# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6 + +# ## configure a bastion host if your nodes are not directly reachable +# [bastion] +# bastion ansible_host=x.x.x.x ansible_user=some_user + +[kube_control_plane] +dsk-minio-master1 + +[etcd] +dsk-minio-master1 + +[kube_node] +dsk-minio-worker1 + +[calico_rr] + +[k8s_cluster:children] +kube_control_plane +kube_node +calico_rr diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/patches/kube-controller-manager+merge.yaml b/kubespray/extra_playbooks/inventory/dsk-minio/patches/kube-controller-manager+merge.yaml new file mode 100644 index 0000000..a8aa5a7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/patches/kube-controller-manager+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10257' \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/dsk-minio/patches/kube-scheduler+merge.yaml b/kubespray/extra_playbooks/inventory/dsk-minio/patches/kube-scheduler+merge.yaml new file mode 100644 index 0000000..0bb3950 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/dsk-minio/patches/kube-scheduler+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10259' \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/all.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/aws.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/azure.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/containerd.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/coreos.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/cri-o.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/docker.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/etcd.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/gcp.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/hcloud.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/oci.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/offline.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/openstack.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/upcloud.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/all/vsphere.yml b/kubespray/extra_playbooks/inventory/local/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/etcd.yml b/kubespray/extra_playbooks/inventory/local/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/addons.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/extra_playbooks/inventory/local/hosts.ini b/kubespray/extra_playbooks/inventory/local/hosts.ini new file mode 100644 index 0000000..4a6197e --- /dev/null +++ b/kubespray/extra_playbooks/inventory/local/hosts.ini @@ -0,0 +1,14 @@ +node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases + +[kube_control_plane] +node1 + +[etcd] +node1 + +[kube_node] +node1 + +[k8s_cluster:children] +kube_node +kube_control_plane diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/all.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/aws.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/azure.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/containerd.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/coreos.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/cri-o.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/docker.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/etcd.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/gcp.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/hcloud.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/oci.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/offline.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/openstack.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/upcloud.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/all/vsphere.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/etcd.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/addons.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/extra_playbooks/inventory/sample/inventory.ini b/kubespray/extra_playbooks/inventory/sample/inventory.ini new file mode 100644 index 0000000..99a6309 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/inventory.ini @@ -0,0 +1,38 @@ +# ## Configure 'ip' variable to bind kubernetes services on a +# ## different ip than the default iface +# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value. +[all] +# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1 +# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2 +# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3 +# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4 +# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5 +# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6 + +# ## configure a bastion host if your nodes are not directly reachable +# [bastion] +# bastion ansible_host=x.x.x.x ansible_user=some_user + +[kube_control_plane] +# node1 +# node2 +# node3 + +[etcd] +# node1 +# node2 +# node3 + +[kube_node] +# node2 +# node3 +# node4 +# node5 +# node6 + +[calico_rr] + +[k8s_cluster:children] +kube_control_plane +kube_node +calico_rr diff --git a/kubespray/extra_playbooks/inventory/sample/patches/kube-controller-manager+merge.yaml b/kubespray/extra_playbooks/inventory/sample/patches/kube-controller-manager+merge.yaml new file mode 100644 index 0000000..a8aa5a7 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/patches/kube-controller-manager+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10257' \ No newline at end of file diff --git a/kubespray/extra_playbooks/inventory/sample/patches/kube-scheduler+merge.yaml b/kubespray/extra_playbooks/inventory/sample/patches/kube-scheduler+merge.yaml new file mode 100644 index 0000000..0bb3950 --- /dev/null +++ b/kubespray/extra_playbooks/inventory/sample/patches/kube-scheduler+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10259' \ No newline at end of file diff --git a/kubespray/extra_playbooks/migrate_openstack_provider.yml b/kubespray/extra_playbooks/migrate_openstack_provider.yml new file mode 100644 index 0000000..2ce86d5 --- /dev/null +++ b/kubespray/extra_playbooks/migrate_openstack_provider.yml @@ -0,0 +1,28 @@ +--- +- hosts: kube_node:kube_control_plane + tasks: + - name: Remove old cloud provider config + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/kubernetes/cloud_config +- hosts: kube_control_plane[0] + tasks: + - name: Include kubespray-default variables + include_vars: ../roles/kubespray-defaults/defaults/main.yaml + - name: Copy get_cinder_pvs.sh to master + copy: + src: get_cinder_pvs.sh + dest: /tmp + mode: u+rwx + - name: Get PVs provisioned by in-tree cloud provider # noqa 301 + command: /tmp/get_cinder_pvs.sh + register: pvs + - name: Remove get_cinder_pvs.sh + file: + path: /tmp/get_cinder_pvs.sh + state: absent + - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301 + command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org" + loop: "{{ pvs.stdout_lines | list }}" diff --git a/kubespray/extra_playbooks/roles/adduser/defaults/main.yml b/kubespray/extra_playbooks/roles/adduser/defaults/main.yml new file mode 100644 index 0000000..faf258d --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/defaults/main.yml @@ -0,0 +1,27 @@ +--- +kube_owner: kube +kube_cert_group: kube-cert +etcd_data_dir: "/var/lib/etcd" + +addusers: + etcd: + name: etcd + comment: "Etcd user" + create_home: no + system: yes + shell: /sbin/nologin + kube: + name: kube + comment: "Kubernetes user" + create_home: no + system: yes + shell: /sbin/nologin + group: "{{ kube_cert_group }}" + +adduser: + name: "{{ user.name }}" + group: "{{ user.name|default(None) }}" + comment: "{{ user.comment|default(None) }}" + shell: "{{ user.shell|default(None) }}" + system: "{{ user.system|default(None) }}" + create_home: "{{ user.create_home|default(None) }}" diff --git a/kubespray/extra_playbooks/roles/adduser/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/adduser/molecule/default/converge.yml new file mode 100644 index 0000000..47ff6c7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: Converge + hosts: all + become: true + gather_facts: false + roles: + - role: adduser + vars: + user: + name: foo diff --git a/kubespray/extra_playbooks/roles/adduser/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/adduser/molecule/default/molecule.yml new file mode 100644 index 0000000..617677e --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/molecule/default/molecule.yml @@ -0,0 +1,27 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: adduser-01 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/adduser/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/adduser/molecule/default/tests/test_default.py new file mode 100644 index 0000000..4c81047 --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/molecule/default/tests/test_default.py @@ -0,0 +1,37 @@ +import os +import yaml +import glob +import testinfra.utils.ansible_runner +from ansible.playbook import Playbook +from ansible.cli.playbook import PlaybookCLI + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + +def read_playbook(playbook): + cli_args = [os.path.realpath(playbook), testinfra_hosts] + cli = PlaybookCLI(cli_args) + cli.parse() + loader, inventory, variable_manager = cli._play_prereqs() + + pb = Playbook.load(cli.args[0], variable_manager, loader) + + for play in pb.get_plays(): + yield variable_manager.get_vars(play) + +def get_playbook(): + with open(os.path.realpath(' '.join(map(str,glob.glob('molecule.*')))), 'r') as yamlfile: + data = yaml.load(yamlfile, Loader=yaml.FullLoader) + if 'playbooks' in data['provisioner'].keys(): + if 'converge' in data['provisioner']['playbooks'].keys(): + return data['provisioner']['playbooks']['converge'] + else: + return ' '.join(map(str,glob.glob('converge.*'))) + +def test_user(host): + for vars in read_playbook(get_playbook()): + assert host.user(vars['user']['name']).exists + if 'group' in vars['user'].keys(): + assert host.group(vars['user']['group']).exists + else: + assert host.group(vars['user']['name']).exists diff --git a/kubespray/extra_playbooks/roles/adduser/tasks/main.yml b/kubespray/extra_playbooks/roles/adduser/tasks/main.yml new file mode 100644 index 0000000..51dd5bb --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: User | Create User Group + group: + name: "{{ user.group|default(user.name) }}" + system: "{{ user.system|default(omit) }}" + +- name: User | Create User + user: + comment: "{{ user.comment|default(omit) }}" + create_home: "{{ user.create_home|default(omit) }}" + group: "{{ user.group|default(user.name) }}" + home: "{{ user.home|default(omit) }}" + shell: "{{ user.shell|default(omit) }}" + name: "{{ user.name }}" + system: "{{ user.system|default(omit) }}" + when: user.name != "root" diff --git a/kubespray/extra_playbooks/roles/adduser/vars/coreos.yml b/kubespray/extra_playbooks/roles/adduser/vars/coreos.yml new file mode 100644 index 0000000..5c258df --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/vars/coreos.yml @@ -0,0 +1,8 @@ +--- +addusers: + - name: kube + comment: "Kubernetes user" + shell: /sbin/nologin + system: yes + group: "{{ kube_cert_group }}" + create_home: no diff --git a/kubespray/extra_playbooks/roles/adduser/vars/debian.yml b/kubespray/extra_playbooks/roles/adduser/vars/debian.yml new file mode 100644 index 0000000..99e5b38 --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/vars/debian.yml @@ -0,0 +1,15 @@ +--- +addusers: + - name: etcd + comment: "Etcd user" + create_home: yes + home: "{{ etcd_data_dir }}" + system: yes + shell: /sbin/nologin + + - name: kube + comment: "Kubernetes user" + create_home: no + system: yes + shell: /sbin/nologin + group: "{{ kube_cert_group }}" diff --git a/kubespray/extra_playbooks/roles/adduser/vars/redhat.yml b/kubespray/extra_playbooks/roles/adduser/vars/redhat.yml new file mode 100644 index 0000000..99e5b38 --- /dev/null +++ b/kubespray/extra_playbooks/roles/adduser/vars/redhat.yml @@ -0,0 +1,15 @@ +--- +addusers: + - name: etcd + comment: "Etcd user" + create_home: yes + home: "{{ etcd_data_dir }}" + system: yes + shell: /sbin/nologin + + - name: kube + comment: "Kubernetes user" + create_home: no + system: yes + shell: /sbin/nologin + group: "{{ kube_cert_group }}" diff --git a/kubespray/extra_playbooks/roles/bastion-ssh-config/defaults/main.yml b/kubespray/extra_playbooks/roles/bastion-ssh-config/defaults/main.yml new file mode 100644 index 0000000..d322814 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bastion-ssh-config/defaults/main.yml @@ -0,0 +1,2 @@ +--- +ssh_bastion_confing__name: ssh-bastion.conf \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/converge.yml new file mode 100644 index 0000000..54a6247 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/converge.yml @@ -0,0 +1,15 @@ +--- +- name: Converge + hosts: all + become: true + gather_facts: false + roles: + - role: bastion-ssh-config + tasks: + - name: Copy config to remote host + copy: + src: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}" + dest: "{{ ssh_bastion_confing__name }}" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: 0644 diff --git a/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/molecule.yml new file mode 100644 index 0000000..5cadd6e --- /dev/null +++ b/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/molecule.yml @@ -0,0 +1,35 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: bastion-01 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + inventory: + hosts: + all: + hosts: + children: + bastion: + hosts: + bastion-01: +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/tests/test_default.py new file mode 100644 index 0000000..f98faa4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bastion-ssh-config/molecule/default/tests/test_default.py @@ -0,0 +1,34 @@ +import os +import yaml +import glob +import testinfra.utils.ansible_runner +from ansible.playbook import Playbook +from ansible.cli.playbook import PlaybookCLI + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + +def read_playbook(playbook): + cli_args = [os.path.realpath(playbook), testinfra_hosts] + cli = PlaybookCLI(cli_args) + cli.parse() + loader, inventory, variable_manager = cli._play_prereqs() + + pb = Playbook.load(cli.args[0], variable_manager, loader) + + for play in pb.get_plays(): + yield variable_manager.get_vars(play) + +def get_playbook(): + with open(os.path.realpath(' '.join(map(str,glob.glob('molecule.*')))), 'r') as yamlfile: + data = yaml.load(yamlfile, Loader=yaml.FullLoader) + if 'playbooks' in data['provisioner'].keys(): + if 'converge' in data['provisioner']['playbooks'].keys(): + return data['provisioner']['playbooks']['converge'] + else: + return ' '.join(map(str,glob.glob('converge.*'))) + +def test_ssh_config(host): + for vars in read_playbook(get_playbook()): + assert host.file(vars['ssh_bastion_confing__name']).exists + assert host.file(vars['ssh_bastion_confing__name']).is_file diff --git a/kubespray/extra_playbooks/roles/bastion-ssh-config/tasks/main.yml b/kubespray/extra_playbooks/roles/bastion-ssh-config/tasks/main.yml new file mode 100644 index 0000000..a18291b --- /dev/null +++ b/kubespray/extra_playbooks/roles/bastion-ssh-config/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: set bastion host IP and port + set_fact: + bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}" + bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}" + delegate_to: localhost + connection: local + +# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly +# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user +- name: Store the current ansible_user in the real_user fact + set_fact: + real_user: "{{ ansible_user }}" + +- name: create ssh bastion conf + become: false + delegate_to: localhost + connection: local + template: + src: "{{ ssh_bastion_confing__name }}.j2" + dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}" + mode: 0640 diff --git a/kubespray/extra_playbooks/roles/bastion-ssh-config/templates/ssh-bastion.conf.j2 b/kubespray/extra_playbooks/roles/bastion-ssh-config/templates/ssh-bastion.conf.j2 new file mode 100644 index 0000000..bd5f49c --- /dev/null +++ b/kubespray/extra_playbooks/roles/bastion-ssh-config/templates/ssh-bastion.conf.j2 @@ -0,0 +1,18 @@ +{% set vars={'hosts': ''} %} +{% set user='' %} + +{% for h in groups['all'] %} +{% if h not in groups['bastion'] %} +{% if vars.update({'hosts': vars['hosts'] + ' ' + (hostvars[h].get('ansible_ssh_host') or hostvars[h]['ansible_host'])}) %}{% endif %} +{% endif %} +{% endfor %} + +Host {{ bastion_ip }} + Hostname {{ bastion_ip }} + StrictHostKeyChecking no + ControlMaster auto + ControlPath ~/.ssh/ansible-%r@%h:%p + ControlPersist 5m + +Host {{ vars['hosts'] }} + ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -p {{ bastion_port }} {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/defaults/main.yml b/kubespray/extra_playbooks/roles/bootstrap-os/defaults/main.yml new file mode 100644 index 0000000..9b31456 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/defaults/main.yml @@ -0,0 +1,32 @@ +--- +## CentOS/RHEL/AlmaLinux specific variables +# Use the fastestmirror yum plugin +centos_fastestmirror_enabled: false + +## Flatcar Container Linux specific variables +# Disable locksmithd or leave it in its current state +coreos_locksmithd_disable: false + +## Oracle Linux specific variables +# Install public repo on Oracle Linux +use_oracle_public_repo: true + +fedora_coreos_packages: + - python + - python3-libselinux + - ethtool # required in kubeadm preflight phase for verifying the environment + - ipset # required in kubeadm preflight phase for verifying the environment + - conntrack-tools # required by kube-proxy + +## General +# Set the hostname to inventory_hostname +override_system_hostname: true + +is_fedora_coreos: false + +skip_http_proxy_on_os_packages: false + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/files/bootstrap.sh b/kubespray/extra_playbooks/roles/bootstrap-os/files/bootstrap.sh new file mode 100755 index 0000000..69b7b75 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/files/bootstrap.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -e + +BINDIR="/opt/bin" +if [[ -e $BINDIR/.bootstrapped ]]; then + exit 0 +fi + +ARCH=$(uname -m) +case $ARCH in + "x86_64") + PYPY_ARCH=linux64 + PYPI_HASH=46818cb3d74b96b34787548343d266e2562b531ddbaf330383ba930ff1930ed5 + ;; + "aarch64") + PYPY_ARCH=aarch64 + PYPI_HASH=2e1ae193d98bc51439642a7618d521ea019f45b8fb226940f7e334c548d2b4b9 + ;; + *) + echo "Unsupported Architecture: ${ARCH}" + exit 1 +esac + +PYTHON_VERSION=3.9 +PYPY_VERSION=7.3.9 +PYPY_FILENAME="pypy${PYTHON_VERSION}-v${PYPY_VERSION}-${PYPY_ARCH}" +PYPI_URL="https://downloads.python.org/pypy/${PYPY_FILENAME}.tar.bz2" + +mkdir -p $BINDIR + +cd $BINDIR + +TAR_FILE=pyp.tar.bz2 +wget -O "${TAR_FILE}" "${PYPI_URL}" +echo "${PYPI_HASH} ${TAR_FILE}" | sha256sum -c - +tar -xjf "${TAR_FILE}" && rm "${TAR_FILE}" +mv -n "${PYPY_FILENAME}" pypy3 + +ln -s ./pypy3/bin/pypy3 python +$BINDIR/python --version + +touch $BINDIR/.bootstrapped diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/handlers/main.yml b/kubespray/extra_playbooks/roles/bootstrap-os/handlers/main.yml new file mode 100644 index 0000000..7c8c4fe --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: RHEL auto-attach subscription + command: /sbin/subscription-manager attach --auto + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/converge.yml new file mode 100644 index 0000000..1f44ec9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/converge.yml @@ -0,0 +1,6 @@ +--- +- name: Converge + hosts: all + gather_facts: no + roles: + - role: bootstrap-os diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/molecule.yml new file mode 100644 index 0000000..8413baa --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/molecule.yml @@ -0,0 +1,57 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: ubuntu16 + box: generic/ubuntu1604 + cpus: 1 + memory: 512 + - name: ubuntu18 + box: generic/ubuntu1804 + cpus: 1 + memory: 512 + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 + - name: centos7 + box: centos/7 + cpus: 1 + memory: 512 + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 512 + - name: debian9 + box: generic/debian9 + cpus: 1 + memory: 512 + - name: debian10 + box: generic/debian10 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + inventory: + group_vars: + all: + user: + name: foo + comment: My test comment +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/tests/test_default.py new file mode 100644 index 0000000..64c59dd --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/molecule/default/tests/test_default.py @@ -0,0 +1,11 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE'] +).get_hosts('all') + + +def test_python(host): + assert host.exists('python3') or host.exists('python') diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-amazon.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-amazon.yml new file mode 100644 index 0000000..2b4d665 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-amazon.yml @@ -0,0 +1,13 @@ +--- +- name: Enable EPEL repo for Amazon Linux + yum_repository: + name: epel + file: epel + description: Extra Packages for Enterprise Linux 7 - $basearch + baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch + gpgcheck: yes + gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no + when: epel_enabled diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-centos.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-centos.yml new file mode 100644 index 0000000..007fdce --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -0,0 +1,117 @@ +--- +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined + ini_file: + path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +# For Oracle Linux install public repo +- name: Download Oracle Linux public yum repo + get_url: + url: https://yum.oracle.com/public-yum-ol7.repo + dest: /etc/yum.repos.d/public-yum-ol7.repo + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) < 7.6 + environment: "{{ proxy_env }}" + +- name: Enable Oracle Linux repo + ini_file: + dest: /etc/yum.repos.d/public-yum-ol7.repo + section: "{{ item }}" + option: enabled + value: "1" + mode: 0644 + with_items: + - ol7_latest + - ol7_addons + - ol7_developer_EPEL + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) < 7.6 + +- name: Install EPEL for Oracle Linux repo package + package: + name: "oracle-epel-release-el{{ ansible_distribution_major_version }}" + state: present + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + +- name: Enable Oracle Linux repo + ini_file: + dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo" + section: "ol{{ ansible_distribution_major_version }}_addons" + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" } + - { option: "enabled", value: "1" } + - { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/$basearch/" } + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + +- name: Enable Centos extra repo for Oracle Linux + ini_file: + dest: "/etc/yum.repos.d/centos-extras.repo" + section: "extras" + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" } + - { option: "enabled", value: "1" } + - { option: "gpgcheck", value: "0" } + - { option: "baseurl", value: "http://mirror.centos.org/centos/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version|int > 7 %}os/{% endif %}" } + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + - (ansible_distribution_version | float) < 9 + +# CentOS ships with python installed + +- name: Check presence of fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf + get_attributes: no + get_checksum: no + get_mime: no + register: fastestmirror + +# the fastestmirror plugin can actually slow down Ansible deployments +- name: Disable fastestmirror plugin if requested + lineinfile: + dest: /etc/yum/pluginconf.d/fastestmirror.conf + regexp: "^enabled=.*" + line: "enabled=0" + state: present + become: true + when: + - fastestmirror.stat.exists + - not centos_fastestmirror_enabled + +# libselinux-python is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux python package + package: + name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + state: present + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml new file mode 100644 index 0000000..de42e3c --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml @@ -0,0 +1,16 @@ +--- +# ClearLinux ships with Python installed + +- name: Install basic package to run containers + package: + name: containers-basic + state: present + +- name: Make sure docker service is enabled + systemd: + name: docker + masked: false + enabled: true + daemon_reload: true + state: started + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-coreos.yml new file mode 100644 index 0000000..737a7ec --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -0,0 +1,37 @@ +--- +# CoreOS ships without Python installed + +- name: Check if bootstrap is needed + raw: stat /opt/bin/.bootstrapped + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Force binaries directory for Container Linux by CoreOS and Flatcar + set_fact: + bin_dir: "/opt/bin" + tags: + - facts + +- name: Run bootstrap.sh + script: bootstrap.sh + become: true + environment: "{{ proxy_env }}" + when: + - need_bootstrap.rc != 0 + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "{{ bin_dir }}/python" + tags: + - facts + +- name: Disable auto-upgrade + systemd: + name: locksmithd.service + masked: true + state: stopped + when: + - coreos_locksmithd_disable diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-debian.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-debian.yml new file mode 100644 index 0000000..47bad20 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-debian.yml @@ -0,0 +1,76 @@ +--- +# Some Debian based distros ship without Python installed + +- name: Check if bootstrap is needed + raw: which python3 + register: need_bootstrap + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + tags: + - facts + +- name: Check http::proxy in apt configuration files + raw: apt-config dump | grep -qsi 'Acquire::http::proxy' + register: need_http_proxy + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- name: Add http_proxy to /etc/apt/apt.conf if http_proxy is defined + raw: echo 'Acquire::http::proxy "{{ http_proxy }}";' >> /etc/apt/apt.conf + become: true + when: + - http_proxy is defined + - need_http_proxy.rc != 0 + - not skip_http_proxy_on_os_packages + +- name: Check https::proxy in apt configuration files + raw: apt-config dump | grep -qsi 'Acquire::https::proxy' + register: need_https_proxy + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- name: Add https_proxy to /etc/apt/apt.conf if https_proxy is defined + raw: echo 'Acquire::https::proxy "{{ https_proxy }}";' >> /etc/apt/apt.conf + become: true + when: + - https_proxy is defined + - need_https_proxy.rc != 0 + - not skip_http_proxy_on_os_packages + +- name: Install python3 + raw: + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y python3-minimal + become: true + when: + - need_bootstrap.rc != 0 + +- name: Update Apt cache + raw: apt-get update --allow-releaseinfo-change + become: true + when: + - '''ID=debian'' in os_release.stdout_lines' + - '''VERSION_ID="10"'' in os_release.stdout_lines or ''VERSION_ID="11"'' in os_release.stdout_lines' + register: bootstrap_update_apt_result + changed_when: + - '"changed its" in bootstrap_update_apt_result.stdout' + - '"value from" in bootstrap_update_apt_result.stdout' + ignore_errors: true + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "/usr/bin/python3" + +# Workaround for https://github.com/ansible/ansible/issues/25543 +- name: Install dbus for the hostname module + package: + name: dbus + state: present + use: apt + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml new file mode 100644 index 0000000..d3fd1c9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml @@ -0,0 +1,46 @@ +--- + +- name: Check if bootstrap is needed + raw: which python + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Remove podman network cni + raw: "podman network rm podman" + become: true + ignore_errors: true # noqa ignore-errors + when: need_bootstrap.rc != 0 + +- name: Clean up possible pending packages on fedora coreos + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree cleanup -p }}" + become: true + when: need_bootstrap.rc != 0 + +- name: Install required packages on fedora coreos + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages|join(' ') }}" + become: true + when: need_bootstrap.rc != 0 + +- name: Reboot immediately for updated ostree + raw: "nohup bash -c 'sleep 5s && shutdown -r now'" + become: true + ignore_errors: true # noqa ignore-errors + ignore_unreachable: yes + when: need_bootstrap.rc != 0 + +- name: Wait for the reboot to complete + wait_for_connection: + timeout: 240 + connect_timeout: 20 + delay: 5 + sleep: 5 + when: need_bootstrap.rc != 0 + +- name: Store the fact if this is an fedora core os host + set_fact: + is_fedora_coreos: True + tags: + - facts diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-fedora.yml new file mode 100644 index 0000000..1613173 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-fedora.yml @@ -0,0 +1,36 @@ +--- +# Some Fedora based distros ship without Python installed + +- name: Check if bootstrap is needed + raw: which python + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Add proxy to dnf.conf if http_proxy is defined + ini_file: + path: "/etc/dnf/dnf.conf" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +- name: Install python3 on fedora + raw: "dnf install --assumeyes --quiet python3" + become: true + when: + - need_bootstrap.rc != 0 + +# libselinux-python3 is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux-python3 + package: + name: libselinux-python3 + state: present + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-flatcar.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-flatcar.yml new file mode 100644 index 0000000..b0f3a9e --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-flatcar.yml @@ -0,0 +1,37 @@ +--- +# Flatcar Container Linux ships without Python installed + +- name: Check if bootstrap is needed + raw: stat /opt/bin/.bootstrapped + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + tags: + - facts + +- name: Run bootstrap.sh + script: bootstrap.sh + become: true + environment: "{{ proxy_env }}" + when: + - need_bootstrap.rc != 0 + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "{{ bin_dir }}/python" + tags: + - facts + +- name: Disable auto-upgrade + systemd: + name: locksmithd.service + masked: true + state: stopped + when: + - coreos_locksmithd_disable diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-opensuse.yml new file mode 100644 index 0000000..c833bfd --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-opensuse.yml @@ -0,0 +1,85 @@ +--- +# OpenSUSE ships with Python installed +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Check that /etc/sysconfig/proxy file exists + stat: + path: /etc/sysconfig/proxy + get_attributes: no + get_checksum: no + get_mime: no + register: stat_result + +- name: Create the /etc/sysconfig/proxy empty file + file: # noqa risky-file-permissions + path: /etc/sysconfig/proxy + state: touch + when: + - http_proxy is defined or https_proxy is defined + - not stat_result.stat.exists + +- name: Set the http_proxy in /etc/sysconfig/proxy + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^HTTP_PROXY=' + line: 'HTTP_PROXY="{{ http_proxy }}"' + become: true + when: + - http_proxy is defined + +- name: Set the https_proxy in /etc/sysconfig/proxy + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^HTTPS_PROXY=' + line: 'HTTPS_PROXY="{{ https_proxy }}"' + become: true + when: + - https_proxy is defined + +- name: Enable proxies + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^PROXY_ENABLED=' + line: 'PROXY_ENABLED="yes"' + become: true + when: + - http_proxy is defined or https_proxy is defined + +# Required for zypper module +- name: Install python-xml + shell: zypper refresh && zypper --non-interactive install python-xml + changed_when: false + become: true + tags: + - facts + +# Without this package, the get_url module fails when trying to handle https +- name: Install python-cryptography + zypper: + name: python-cryptography + state: present + update_cache: true + become: true + when: + - ansible_distribution_version is version('15.4', '<') + +- name: Install python3-cryptography + zypper: + name: python3-cryptography + state: present + update_cache: true + become: true + when: + - ansible_distribution_version is version('15.4', '>=') + +# Nerdctl needs some basic packages to get an environment up +- name: Install basic dependencies + zypper: + name: + - iptables + - apparmor-parser + state: present + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-redhat.yml new file mode 100644 index 0000000..8f32388 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/bootstrap-redhat.yml @@ -0,0 +1,121 @@ +--- +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined + ini_file: + path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +- name: Add proxy to RHEL subscription-manager if http_proxy is defined + command: /sbin/subscription-manager config --server.proxy_hostname={{ http_proxy | regex_replace(':\d+$') }} --server.proxy_port={{ http_proxy | regex_replace('^.*:') }} + become: true + when: + - not skip_http_proxy_on_os_packages + - http_proxy is defined + +- name: Check RHEL subscription-manager status + command: /sbin/subscription-manager status + register: rh_subscription_status + changed_when: "rh_subscription_status != 0" + ignore_errors: true # noqa ignore-errors + become: true + +- name: RHEL subscription Organization ID/Activation Key registration + redhat_subscription: + state: present + org_id: "{{ rh_subscription_org_id }}" + activationkey: "{{ rh_subscription_activation_key }}" + auto_attach: true + force_register: true + syspurpose: + usage: "{{ rh_subscription_usage }}" + role: "{{ rh_subscription_role }}" + service_level_agreement: "{{ rh_subscription_sla }}" + sync: true + notify: RHEL auto-attach subscription + ignore_errors: true # noqa ignore-errors + become: true + when: + - rh_subscription_org_id is defined + - rh_subscription_status.changed + +# this task has no_log set to prevent logging security sensitive information such as subscription passwords +- name: RHEL subscription Username/Password registration + redhat_subscription: + state: present + username: "{{ rh_subscription_username }}" + password: "{{ rh_subscription_password }}" + auto_attach: true + force_register: true + syspurpose: + usage: "{{ rh_subscription_usage }}" + role: "{{ rh_subscription_role }}" + service_level_agreement: "{{ rh_subscription_sla }}" + sync: true + notify: RHEL auto-attach subscription + ignore_errors: true # noqa ignore-errors + become: true + no_log: "{{ not (unsafe_show_logs|bool) }}" + when: + - rh_subscription_username is defined + - rh_subscription_status.changed + +# container-selinux is in extras repo +- name: Enable RHEL 7 repos + rhsm_repository: + name: + - "rhel-7-server-rpms" + - "rhel-7-server-extras-rpms" + state: enabled + when: + - rhel_enable_repos | default(True) | bool + - ansible_distribution_major_version == "7" + +# container-selinux is in appstream repo +- name: Enable RHEL 8 repos + rhsm_repository: + name: + - "rhel-8-for-*-baseos-rpms" + - "rhel-8-for-*-appstream-rpms" + state: enabled + when: + - rhel_enable_repos | default(True) | bool + - ansible_distribution_major_version == "8" + +- name: Check presence of fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf + get_attributes: no + get_checksum: no + get_mime: no + register: fastestmirror + +# the fastestmirror plugin can actually slow down Ansible deployments +- name: Disable fastestmirror plugin if requested + lineinfile: + dest: /etc/yum/pluginconf.d/fastestmirror.conf + regexp: "^enabled=.*" + line: "enabled=0" + state: present + become: true + when: + - fastestmirror.stat.exists + - not centos_fastestmirror_enabled + +# libselinux-python is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux python package + package: + name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + state: present + become: true diff --git a/kubespray/extra_playbooks/roles/bootstrap-os/tasks/main.yml b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/main.yml new file mode 100644 index 0000000..853ce09 --- /dev/null +++ b/kubespray/extra_playbooks/roles/bootstrap-os/tasks/main.yml @@ -0,0 +1,100 @@ +--- +- name: Fetch /etc/os-release + raw: cat /etc/os-release + register: os_release + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- include_tasks: bootstrap-centos.yml + when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-amazon.yml + when: '''ID="amzn"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-redhat.yml + when: '''ID="rhel"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-clearlinux.yml + when: '''ID=clear-linux-os'' in os_release.stdout_lines' + +# Fedora CoreOS +- include_tasks: bootstrap-fedora-coreos.yml + when: + - '''ID=fedora'' in os_release.stdout_lines' + - '''VARIANT_ID=coreos'' in os_release.stdout_lines' + +- include_tasks: bootstrap-flatcar.yml + when: '''ID=flatcar'' in os_release.stdout_lines' + +- include_tasks: bootstrap-debian.yml + when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines' + +# Fedora "classic" +- include_tasks: bootstrap-fedora.yml + when: + - '''ID=fedora'' in os_release.stdout_lines' + - '''VARIANT_ID=coreos'' not in os_release.stdout_lines' + +- include_tasks: bootstrap-opensuse.yml + when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines' + +- name: Create remote_tmp for it is used by another module + file: + path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}" + state: directory + mode: 0700 + +# Workaround for https://github.com/ansible/ansible/issues/42726 +# (1/3) +- name: Gather host facts to get ansible_os_family + setup: + gather_subset: '!all' + filter: ansible_* + +- name: Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora) + hostname: + name: "{{ inventory_hostname }}" + when: + - override_system_hostname + - ansible_os_family not in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + - not ansible_distribution == "Fedora" + - not is_fedora_coreos + +# (2/3) +- name: Assign inventory name to unconfigured hostnames (CoreOS, Flatcar, Suse, ClearLinux and Fedora only) + command: "hostnamectl set-hostname {{ inventory_hostname }}" + register: hostname_changed + become: true + changed_when: false + when: > + override_system_hostname + and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + or is_fedora_coreos + or ansible_distribution == "Fedora") + +# (3/3) +- name: Update hostname fact (CoreOS, Flatcar, Suse, ClearLinux and Fedora only) + setup: + gather_subset: '!all' + filter: ansible_hostname + when: > + override_system_hostname + and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + or is_fedora_coreos + or ansible_distribution == "Fedora") + +- name: Install ceph-commmon package + package: + name: + - ceph-common + state: present + when: rbd_provisioner_enabled|default(false) + +- name: Ensure bash_completion.d folder exists + file: + name: /etc/bash_completion.d/ + state: directory + owner: root + group: root + mode: 0755 diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd-common/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd-common/defaults/main.yml new file mode 100644 index 0000000..ae1c6e0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd-common/defaults/main.yml @@ -0,0 +1,17 @@ +--- +# We keep these variables around to allow migration from package +# manager controlled installs to direct download ones. +containerd_package: 'containerd.io' +yum_repo_dir: /etc/yum.repos.d + +# Keep minimal repo information around for cleanup +containerd_repo_info: + repos: + +# Ubuntu docker-ce repo +containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +containerd_ubuntu_repo_component: "stable" + +# Debian docker-ce repo +containerd_debian_repo_base_url: "https://download.docker.com/linux/debian" +containerd_debian_repo_component: "stable" diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd-common/meta/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd-common/meta/main.yml new file mode 100644 index 0000000..a4159c5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd-common/meta/main.yml @@ -0,0 +1,2 @@ +--- +allow_duplicates: true \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd-common/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd-common/tasks/main.yml new file mode 100644 index 0000000..cfd78f3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd-common/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: containerd-common | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: containerd-common | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: containerd-common | gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + tags: + - facts diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd-common/vars/amazon.yml b/kubespray/extra_playbooks/roles/container-engine/containerd-common/vars/amazon.yml new file mode 100644 index 0000000..0568169 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd-common/vars/amazon.yml @@ -0,0 +1,2 @@ +--- +containerd_package: containerd diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd-common/vars/suse.yml b/kubespray/extra_playbooks/roles/container-engine/containerd-common/vars/suse.yml new file mode 100644 index 0000000..0568169 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd-common/vars/suse.yml @@ -0,0 +1,2 @@ +--- +containerd_package: containerd diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/defaults/main.yml new file mode 100644 index 0000000..83115c4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/defaults/main.yml @@ -0,0 +1,75 @@ +--- +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_systemd_dir: "/etc/systemd/system/containerd.service.d" +# The default value is not -999 here because containerd's oom_score_adj has been +# set to the -999 even if containerd_oom_score is 0. +# Ref: https://github.com/kubernetes-sigs/kubespray/pull/9275#issuecomment-1246499242 +containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +containerd_runc_runtime: + name: runc + type: "io.containerd.runc.v2" + engine: "" + root: "" + base_runtime_spec: cri-base.json + options: + systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}" + +containerd_additional_runtimes: [] +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +containerd_base_runtime_spec_rlimit_nofile: 65535 + +containerd_default_base_runtime_spec_patch: + process: + rlimits: + - type: RLIMIT_NOFILE + hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}" + soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}" + +containerd_base_runtime_specs: + cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}" + +containerd_grpc_max_recv_message_size: 16777216 +containerd_grpc_max_send_message_size: 16777216 + +containerd_debug_level: "info" + +containerd_metrics_address: "" + +containerd_metrics_grpc_histogram: false + +containerd_registries: + "docker.io": "https://registry-1.docker.io" + +containerd_max_container_log_line_size: -1 + +# If enabled it will allow non root users to use port numbers <1024 +containerd_enable_unprivileged_ports: false +# If enabled it will allow non root users to use icmp sockets +containerd_enable_unprivileged_icmp: false + +containerd_cfg_dir: /etc/containerd + +# Extra config to be put in {{ containerd_cfg_dir }}/config.toml literally +containerd_extra_args: '' + +# Configure registry auth (if applicable to secure/insecure registries) +containerd_registry_auth: [] +# - registry: 10.0.0.2:5000 +# username: user +# password: pass + +# Configure containerd service +containerd_limit_proc_num: "infinity" +containerd_limit_core: "infinity" +containerd_limit_open_file_num: "infinity" +containerd_limit_mem_lock: "infinity" diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/handlers/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/handlers/main.yml new file mode 100644 index 0000000..d2f1265 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/handlers/main.yml @@ -0,0 +1,21 @@ +--- +- name: restart containerd + command: /bin/true + notify: + - Containerd | restart containerd + - Containerd | wait for containerd + +- name: Containerd | restart containerd + systemd: + name: containerd + state: restarted + enabled: yes + daemon-reload: yes + masked: no + +- name: Containerd | wait for containerd + command: "{{ containerd_bin_dir }}/ctr images ls -q" + register: containerd_ready + retries: 8 + delay: 4 + until: containerd_ready.rc == 0 diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/handlers/reset.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/handlers/reset.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/handlers/reset.yml @@ -0,0 +1 @@ +--- diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/meta/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/meta/main.yml new file mode 100644 index 0000000..41c5b6a --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: container-engine/containerd-common + - role: container-engine/runc + - role: container-engine/crictl + - role: container-engine/nerdctl diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/converge.yml new file mode 100644 index 0000000..7847871 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/molecule.yml new file mode 100644 index 0000000..009b5aa --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/molecule.yml @@ -0,0 +1,49 @@ +--- +driver: + name: vagrant + provider: + name: libvirt +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: debian11 + box: generic/debian11 + cpus: 1 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/prepare.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/prepare.yml new file mode 100644 index 0000000..100673c --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/prepare.yml @@ -0,0 +1,28 @@ +--- +- name: Prepare + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: kubernetes/preinstall + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare CNI + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..e1d9151 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/molecule/default/tests/test_default.py @@ -0,0 +1,55 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("containerd") + assert svc.is_running + assert svc.is_enabled + + +def test_version(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/containerd/containerd.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: containerd" in cmd.stdout + + +@pytest.mark.parametrize('image, dest', [ + ('quay.io/kubespray/hello-world:latest', '/tmp/hello-world.tar') +]) +def test_image_pull_save_load(host, image, dest): + nerdctl = "/usr/local/bin/nerdctl" + dest_file = host.file(dest) + + with host.sudo(): + pull_cmd = host.command(nerdctl + " pull " + image) + assert pull_cmd.rc ==0 + + with host.sudo(): + save_cmd = host.command(nerdctl + " save -o " + dest + " " + image) + assert save_cmd.rc == 0 + assert dest_file.exists + + with host.sudo(): + load_cmd = host.command(nerdctl + " load < " + dest) + assert load_cmd.rc == 0 + + +@pytest.mark.parametrize('image', [ + ('quay.io/kubespray/hello-world:latest') +]) +def test_run(host, image): + nerdctl = "/usr/local/bin/nerdctl" + + with host.sudo(): + cmd = host.command(nerdctl + " -n k8s.io run " + image) + assert cmd.rc == 0 + assert "Hello from Docker" in cmd.stdout diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/tasks/main.yml new file mode 100644 index 0000000..03b9668 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/tasks/main.yml @@ -0,0 +1,124 @@ +--- +- name: Fail containerd setup if distribution is not supported + fail: + msg: "{{ ansible_distribution }} is not supported by containerd." + when: + - ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server", "UnionTech", "openEuler"] + +- name: containerd | Remove any package manager controlled containerd package + package: + name: "{{ containerd_package }}" + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + +- name: containerd | Remove containerd repository + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + +- name: containerd | Remove containerd repository + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + +- name: containerd | Download containerd + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.containerd) }}" + +- name: containerd | Unpack containerd archive + unarchive: + src: "{{ downloads.containerd.dest }}" + dest: "{{ containerd_bin_dir }}" + mode: 0755 + remote_src: yes + extra_opts: + - --strip-components=1 + notify: restart containerd + +- name: containerd | Remove orphaned binary + file: + path: "/usr/bin/{{ item }}" + state: absent + when: + - containerd_bin_dir != "/usr/bin" + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + with_items: + - containerd + - containerd-shim + - containerd-shim-runc-v1 + - containerd-shim-runc-v2 + - ctr + +- name: containerd | Generate systemd service for containerd + template: + src: containerd.service.j2 + dest: /etc/systemd/system/containerd.service + mode: 0644 + notify: restart containerd + +- name: containerd | Ensure containerd directories exist + file: + dest: "{{ item }}" + state: directory + mode: 0755 + owner: root + group: root + with_items: + - "{{ containerd_systemd_dir }}" + - "{{ containerd_cfg_dir }}" + - "{{ containerd_storage_dir }}" + - "{{ containerd_state_dir }}" + +- name: containerd | Write containerd proxy drop-in + template: + src: http-proxy.conf.j2 + dest: "{{ containerd_systemd_dir }}/http-proxy.conf" + mode: 0644 + notify: restart containerd + when: http_proxy is defined or https_proxy is defined + +- name: containerd | Generate default base_runtime_spec + register: ctr_oci_spec + command: "{{ containerd_bin_dir }}/ctr oci spec" + check_mode: false + changed_when: false + +- name: containerd | Store generated default base_runtime_spec + set_fact: + containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}" + +- name: containerd | Write base_runtime_specs + copy: + content: "{{ item.value }}" + dest: "{{ containerd_cfg_dir }}/{{ item.key }}" + owner: "root" + mode: 0644 + with_dict: "{{ containerd_base_runtime_specs | default({}) }}" + notify: restart containerd + +- name: containerd | Copy containerd config file + template: + src: config.toml.j2 + dest: "{{ containerd_cfg_dir }}/config.toml" + owner: "root" + mode: 0640 + notify: restart containerd + +# you can sometimes end up in a state where everything is installed +# but containerd was not started / enabled +- name: containerd | Flush handlers + meta: flush_handlers + +- name: containerd | Ensure containerd is started and enabled + systemd: + name: containerd + daemon_reload: yes + enabled: yes + state: started diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/tasks/reset.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/tasks/reset.yml new file mode 100644 index 0000000..5c551b6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/tasks/reset.yml @@ -0,0 +1,41 @@ +--- +- name: containerd | Remove containerd repository for RedHat os family + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + tags: + - reset_containerd + +- name: containerd | Remove containerd repository for Debian os family + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + tags: + - reset_containerd + +- name: containerd | Stop containerd service + service: + name: containerd + daemon_reload: true + enabled: false + masked: true + state: stopped + tags: + - reset_containerd + +- name: containerd | Remove configuration files + file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/containerd.service + - "{{ containerd_systemd_dir }}" + - "{{ containerd_cfg_dir }}" + - "{{ containerd_storage_dir }}" + - "{{ containerd_state_dir }}" + tags: + - reset_containerd diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/templates/config.toml.j2 b/kubespray/extra_playbooks/roles/container-engine/containerd/templates/config.toml.j2 new file mode 100644 index 0000000..c1bda12 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/templates/config.toml.j2 @@ -0,0 +1,79 @@ +version = 2 +root = "{{ containerd_storage_dir }}" +state = "{{ containerd_state_dir }}" +oom_score = {{ containerd_oom_score }} + +[grpc] + max_recv_message_size = {{ containerd_grpc_max_recv_message_size | default(16777216) }} + max_send_message_size = {{ containerd_grpc_max_send_message_size | default(16777216) }} + +[debug] + level = "{{ containerd_debug_level | default('info') }}" + +[metrics] + address = "{{ containerd_metrics_address | default('') }}" + grpc_histogram = {{ containerd_metrics_grpc_histogram | default(false) | lower }} + +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" + max_container_log_line_size = {{ containerd_max_container_log_line_size }} + enable_unprivileged_ports = {{ containerd_enable_unprivileged_ports | default(false) | lower }} + enable_unprivileged_icmp = {{ containerd_enable_unprivileged_icmp | default(false) | lower }} + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "{{ containerd_default_runtime | default('runc') }}" + snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] +{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}] + runtime_type = "{{ runtime.type }}" + runtime_engine = "{{ runtime.engine }}" + runtime_root = "{{ runtime.root }}" +{% if runtime.base_runtime_spec is defined %} + base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}" +{% endif %} + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options] +{% for key, value in runtime.options.items() %} + {{ key }} = {{ value }} +{% endfor %} +{% endfor %} +{% if kata_containers_enabled %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-qemu] + runtime_type = "io.containerd.kata-qemu.v2" +{% endif %} +{% if gvisor_enabled %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc] + runtime_type = "io.containerd.runsc.v1" +{% endif %} + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] +{% for registry, addr in containerd_registries.items() %} + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"] + endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"] +{% endfor %} +{% if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 %} +{% for registry, addr in containerd_insecure_registries.items() %} + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"] + endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"] +{% endfor %} +{% for addr in containerd_insecure_registries.values() | flatten | unique %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr }}".tls] + insecure_skip_verify = true +{% endfor %} +{% endif %} +{% for registry in containerd_registry_auth if registry['registry'] is defined %} +{% if (registry['username'] is defined and registry['password'] is defined) or registry['auth'] is defined %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry['registry'] }}".auth] +{% if registry['username'] is defined and registry['password'] is defined %} + password = "{{ registry['password'] }}" + username = "{{ registry['username'] }}" +{% else %} + auth = "{{ registry['auth'] }}" +{% endif %} +{% endif %} +{% endfor %} + +{% if containerd_extra_args is defined %} +{{ containerd_extra_args }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/templates/containerd.service.j2 b/kubespray/extra_playbooks/roles/container-engine/containerd/templates/containerd.service.j2 new file mode 100644 index 0000000..adebcf2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/templates/containerd.service.j2 @@ -0,0 +1,41 @@ +# Copyright The containerd Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[Unit] +Description=containerd container runtime +Documentation=https://containerd.io +After=network.target local-fs.target + +[Service] +ExecStartPre=-/sbin/modprobe overlay +ExecStart={{ containerd_bin_dir }}/containerd + +Type=notify +Delegate=yes +KillMode=process +Restart=always +RestartSec=5 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC={{ containerd_limit_proc_num }} +LimitCORE={{ containerd_limit_core }} +LimitNOFILE={{ containerd_limit_open_file_num }} +LimitMEMLOCK={{ containerd_limit_mem_lock }} +# Comment TasksMax if your systemd version does not supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/templates/http-proxy.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/containerd/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..212f30f --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/vars/debian.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/vars/debian.yml new file mode 100644 index 0000000..99dc4a5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/vars/debian.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + {{ containerd_debian_repo_component }} diff --git a/kubespray/extra_playbooks/roles/container-engine/containerd/vars/ubuntu.yml b/kubespray/extra_playbooks/roles/container-engine/containerd/vars/ubuntu.yml new file mode 100644 index 0000000..ccce96d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/containerd/vars/ubuntu.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + {{ containerd_ubuntu_repo_component }} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/handlers/main.yml b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/handlers/main.yml new file mode 100644 index 0000000..9d9d8c6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/handlers/main.yml @@ -0,0 +1,35 @@ +--- +- name: restart and enable cri-dockerd + command: /bin/true + notify: + - cri-dockerd | reload systemd + - cri-dockerd | restart docker.service + - cri-dockerd | reload cri-dockerd.socket + - cri-dockerd | reload cri-dockerd.service + - cri-dockerd | enable cri-dockerd service + +- name: cri-dockerd | reload systemd + systemd: + name: cri-dockerd + daemon_reload: true + masked: no + +- name: cri-dockerd | restart docker.service + service: + name: docker.service + state: restarted + +- name: cri-dockerd | reload cri-dockerd.socket + service: + name: cri-dockerd.socket + state: restarted + +- name: cri-dockerd | reload cri-dockerd.service + service: + name: cri-dockerd.service + state: restarted + +- name: cri-dockerd | enable cri-dockerd service + service: + name: cri-dockerd.service + enabled: yes diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/meta/main.yml b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/meta/main.yml new file mode 100644 index 0000000..4923f3b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - role: container-engine/docker + - role: container-engine/crictl diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/converge.yml new file mode 100644 index 0000000..be6fa38 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + container_manager: docker + roles: + - role: kubespray-defaults + - role: container-engine/cri-dockerd diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/container.json b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/container.json new file mode 100644 index 0000000..1d839e6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "cri-dockerd1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "cri-dockerd1.0.log", + "linux": {} +} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json new file mode 100644 index 0000000..f451e9e --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "cri-dockerd1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/molecule.yml new file mode 100644 index 0000000..c82ddba --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/prepare.yml b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/prepare.yml new file mode 100644 index 0000000..c54feac --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/prepare.yml @@ -0,0 +1,47 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..dc99b34 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py @@ -0,0 +1,19 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run_pod(host): + run_command = "/usr/local/bin/crictl run --with-pull /tmp/container.json /tmp/sandbox.json" + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/cri-dockerd1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/tasks/main.yml new file mode 100644 index 0000000..9ce3ec6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: runc | Download cri-dockerd binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cri_dockerd) }}" + +- name: Copy cri-dockerd binary from download dir + copy: + src: "{{ local_release_dir }}/cri-dockerd" + dest: "{{ bin_dir }}/cri-dockerd" + mode: 0755 + remote_src: true + notify: + - restart and enable cri-dockerd + +- name: Generate cri-dockerd systemd unit files + template: + src: "{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + mode: 0644 + with_items: + - cri-dockerd.service + - cri-dockerd.socket + notify: + - restart and enable cri-dockerd + +- name: Flush handlers + meta: flush_handlers diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 new file mode 100644 index 0000000..078f666 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 @@ -0,0 +1,40 @@ +[Unit] +Description=CRI Interface for Docker Application Container Engine +Documentation=https://docs.mirantis.com +After=network-online.target firewalld.service docker.service +Wants=network-online.target docker.service +Requires=cri-dockerd.socket + +[Service] +Type=notify +ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %} + +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always + +# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. +# Both the old, and new location are accepted by systemd 229 and up, so using the old location +# to make them work for either version of systemd. +StartLimitBurst=3 + +# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. +# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make +# this option work for either version of systemd. +StartLimitInterval=60s + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not support it. +# Only systemd 226 and above support this option. +TasksMax=infinity +Delegate=yes +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2 new file mode 100644 index 0000000..8dfa27d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=CRI Docker Socket for the API +PartOf=cri-dockerd.service + +[Socket] +ListenStream=%t/cri-dockerd.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/defaults/main.yml new file mode 100644 index 0000000..6b757fe --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/defaults/main.yml @@ -0,0 +1,103 @@ +--- + +crio_cgroup_manager: "{{ kubelet_cgroup_driver | default('systemd') }}" +crio_conmon: "{{ bin_dir }}/conmon" +crio_enable_metrics: false +crio_log_level: "info" +crio_metrics_port: "9090" +crio_pause_image: "{{ pod_infra_image_repo }}:{{ pod_infra_version }}" + +# Registries defined within cri-o. +# By default unqualified images are not allowed for security reasons +crio_registries: [] +# - prefix: docker.io +# insecure: false +# blocked: false +# location: registry-1.docker.io ## REQUIRED +# unqualified: false +# mirrors: +# - location: 172.20.100.52:5000 +# insecure: true +# - location: mirror.gcr.io +# insecure: false + +crio_registry_auth: [] +# - registry: 10.0.0.2:5000 +# username: user +# password: pass + +crio_seccomp_profile: "" +crio_selinux: "{{ (preinstall_selinux_state == 'enforcing')|lower }}" +crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}" + +# Override system default for storage driver +# crio_storage_driver: "overlay" + +crio_stream_port: "10010" + +crio_required_version: "{{ kube_version | regex_replace('^v(?P\\d+).(?P\\d+).(?P\\d+)$', '\\g.\\g') }}" + +# The crio_runtimes variable defines a list of OCI compatible runtimes. +crio_runtimes: + - name: runc + path: "{{ bin_dir }}/runc" + type: oci + root: /run/runc + +# Kata Containers is an OCI runtime, where containers are run inside lightweight +# VMs. Kata provides additional isolation towards the host, minimizing the host attack +# surface and mitigating the consequences of containers breakout. +kata_runtimes: + # Kata Containers with the default configured VMM + - name: kata-qemu + path: /usr/local/bin/containerd-shim-kata-qemu-v2 + type: vm + root: /run/kata-containers + privileged_without_host_devices: true + +# crun is a fast and low-memory footprint OCI Container Runtime fully written in C. +crun_runtime: + name: crun + path: "{{ bin_dir }}/crun" + type: oci + root: /run/crun + +# youki is an implementation of the OCI runtime-spec in Rust, similar to runc. +youki_runtime: + name: youki + path: "{{ youki_bin_dir }}/youki" + type: oci + root: /run/youki + +# TODO(cristicalin): remove this after 2.21 +crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# Configure the cri-o pids limit, increase this for heavily multi-threaded workloads +# see https://github.com/cri-o/cri-o/issues/1921 +crio_pids_limit: 1024 + +# Reserve 16M uids and gids for user namespaces (256 pods * 65536 uids/gids) +# at the end of the uid/gid space +crio_remap_enable: false +crio_remap_user: containers +crio_subuid_start: 2130706432 +crio_subuid_length: 16777216 +crio_subgid_start: 2130706432 +crio_subgid_length: 16777216 + +# cri-o binary files +crio_bin_files: + - conmon + - crio + - crio-status + - pinns + +# cri-o manual files +crio_man_files: + 5: + - crio.conf + - crio.conf.d + 8: + - crio + - crio-status diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/files/mounts.conf b/kubespray/extra_playbooks/roles/container-engine/cri-o/files/mounts.conf new file mode 100644 index 0000000..b7cde9d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/files/mounts.conf @@ -0,0 +1 @@ +/usr/share/rhel/secrets:/run/secrets diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/handlers/main.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/handlers/main.yml new file mode 100644 index 0000000..8bc936b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/handlers/main.yml @@ -0,0 +1,16 @@ +--- +- name: restart crio + command: /bin/true + notify: + - CRI-O | reload systemd + - CRI-O | reload crio + +- name: CRI-O | reload systemd + systemd: + daemon_reload: true + +- name: CRI-O | reload crio + service: + name: crio + state: restarted + enabled: yes diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/meta/main.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/meta/main.yml new file mode 100644 index 0000000..3304f70 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: container-engine/crictl + - role: container-engine/runc + - role: container-engine/skopeo diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/converge.yml new file mode 100644 index 0000000..376f07c --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + container_manager: crio + roles: + - role: kubespray-defaults + - role: container-engine/cri-o diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/container.json b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/container.json new file mode 100644 index 0000000..bcd71e7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "runc1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "runc1.0.log", + "linux": {} +} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/sandbox.json b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/sandbox.json new file mode 100644 index 0000000..eb9dcb9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "runc1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/molecule.yml new file mode 100644 index 0000000..163eb8e --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/molecule.yml @@ -0,0 +1,57 @@ +--- +driver: + name: vagrant + provider: + name: libvirt +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: almalinux8 + box: almalinux/8 + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: fedora + box: fedora/36-cloud-base + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: debian10 + box: generic/debian10 + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/prepare.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/prepare.yml new file mode 100644 index 0000000..ec47a1e --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/prepare.yml @@ -0,0 +1,52 @@ +--- +- name: Prepare + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: kubernetes/preinstall + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare CNI + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/tests/test_default.py new file mode 100644 index 0000000..358a1b7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/molecule/default/tests/test_default.py @@ -0,0 +1,35 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("crio") + assert svc.is_running + assert svc.is_enabled + + +def test_run(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/crio/crio.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: cri-o" in cmd.stdout + +def test_run_pod(host): + runtime = "runc" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/runc1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/cleanup.yaml b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/cleanup.yaml new file mode 100644 index 0000000..28c0c3a --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/cleanup.yaml @@ -0,0 +1,119 @@ +--- +# TODO(cristicalin): drop this file after 2.21 +- name: CRI-O kubic repo name for debian os family + set_fact: + crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" + when: ansible_os_family == "Debian" + +- name: Remove legacy CRI-O kubic apt repo key + apt_key: + url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key" + state: absent + when: crio_kubic_debian_repo_name is defined + +- name: Remove legacy CRI-O kubic apt repo + apt_repository: + repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /" + state: absent + filename: devel-kubic-libcontainers-stable + when: crio_kubic_debian_repo_name is defined + +- name: Remove legacy CRI-O kubic cri-o apt repo + apt_repository: + repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /" + state: absent + filename: devel-kubic-libcontainers-stable-cri-o + when: crio_kubic_debian_repo_name is defined + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever) + baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/ + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution not in ["Amazon", "Fedora"] + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + description: "CRI-O {{ crio_version }} (CentOS_$releasever)" + baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/" + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution not in ["Amazon", "Fedora"] + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + description: Stable Releases of Upstream github.com/containers packages + baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/ + state: absent + when: + - ansible_distribution in ["Fedora"] + - not is_ostree + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + description: "CRI-O {{ crio_version }}" + baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/" + state: absent + when: + - ansible_distribution in ["Fedora"] + - not is_ostree + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + description: Stable Releases of Upstream github.com/containers packages + baseurl: http://{{ crio_download_base }}/CentOS_7/ + state: absent + when: ansible_distribution in ["Amazon"] + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + description: "CRI-O {{ crio_version }}" + baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/" + state: absent + when: ansible_distribution in ["Amazon"] + +- name: Disable modular repos for CRI-O + ini_file: + path: "/etc/yum.repos.d/{{ item.repo }}.repo" + section: "{{ item.section }}" + option: enabled + value: 0 + mode: 0644 + become: true + when: is_ostree + loop: + - repo: "fedora-updates-modular" + section: "updates-modular" + - repo: "fedora-modular" + section: "fedora-modular" + +# Disable any older module version if we enabled them before +- name: Disable CRI-O ex module + command: "rpm-ostree ex module disable cri-o:{{ item }}" + become: true + when: + - is_ostree + - ostree_version is defined and ostree_version.stdout is version('2021.9', '>=') + with_items: + - 1.22 + - 1.23 + - 1.24 + +- name: cri-o | remove installed packages + package: + name: "{{ item }}" + state: absent + when: not is_ostree + with_items: + - cri-o + - cri-o-runc + - oci-systemd-hook diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/main.yaml b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/main.yaml new file mode 100644 index 0000000..89aab56 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/main.yaml @@ -0,0 +1,206 @@ +--- +- name: cri-o | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: cri-o | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: cri-o | get ostree version + shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'" + args: + executable: /bin/bash + register: ostree_version + when: is_ostree + +- name: cri-o | Download cri-o + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.crio) }}" + +- name: cri-o | special handling for amazon linux + import_tasks: "setup-amazon.yaml" + when: ansible_distribution in ["Amazon"] + +- name: cri-o | clean up reglacy repos + import_tasks: "cleanup.yaml" + +- name: cri-o | build a list of crio runtimes with Katacontainers runtimes + set_fact: + crio_runtimes: "{{ crio_runtimes + kata_runtimes }}" + when: + - kata_containers_enabled + +- name: cri-o | build a list of crio runtimes with crun runtime + set_fact: + crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}" + when: + - crun_enabled + +- name: cri-o | build a list of crio runtimes with youki runtime + set_fact: + crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}" + when: + - youki_enabled + +- name: cri-o | make sure needed folders exist in the system + with_items: + - /etc/crio + - /etc/containers + - /etc/systemd/system/crio.service.d + file: + path: "{{ item }}" + state: directory + mode: 0755 + +- name: cri-o | install cri-o config + template: + src: crio.conf.j2 + dest: /etc/crio/crio.conf + mode: 0644 + register: config_install + +- name: cri-o | install config.json + template: + src: config.json.j2 + dest: /etc/crio/config.json + mode: 0644 + register: reg_auth_install + +- name: cri-o | copy binaries + copy: + src: "{{ local_release_dir }}/cri-o/bin/{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: true + with_items: + - "{{ crio_bin_files }}" + notify: restart crio + +- name: cri-o | copy service file + copy: + src: "{{ local_release_dir }}/cri-o/contrib/crio.service" + dest: /etc/systemd/system/crio.service + mode: 0755 + remote_src: true + notify: restart crio + +- name: cri-o | copy default policy + copy: + src: "{{ local_release_dir }}/cri-o/contrib/policy.json" + dest: /etc/containers/policy.json + mode: 0755 + remote_src: true + notify: restart crio + +- name: cri-o | copy mounts.conf + copy: + src: mounts.conf + dest: /etc/containers/mounts.conf + mode: 0644 + when: + - ansible_os_family == 'RedHat' + notify: restart crio + +- name: cri-o | create directory for oci hooks + file: + path: /etc/containers/oci/hooks.d + state: directory + owner: root + mode: 0755 + +- name: cri-o | set overlay driver + ini_file: + dest: /etc/containers/storage.conf + section: storage + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - option: driver + value: '"overlay"' + - option: graphroot + value: '"/var/lib/containers/storage"' + +# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel +- name: cri-o | set metacopy mount options correctly + ini_file: + dest: /etc/containers/storage.conf + section: storage.options.overlay + option: mountopt + value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}' + mode: 0644 + +- name: cri-o | create directory registries configs + file: + path: /etc/containers/registries.conf.d + state: directory + owner: root + mode: 0755 + +- name: cri-o | write registries configs + template: + src: registry.conf.j2 + dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf" + mode: 0644 + loop: "{{ crio_registries }}" + notify: restart crio + +- name: cri-o | configure unqualified registry settings + template: + src: unqualified.conf.j2 + dest: "/etc/containers/registries.conf.d/01-unqualified.conf" + mode: 0644 + notify: restart crio + +- name: cri-o | write cri-o proxy drop-in + template: + src: http-proxy.conf.j2 + dest: /etc/systemd/system/crio.service.d/http-proxy.conf + mode: 0644 + notify: restart crio + when: http_proxy is defined or https_proxy is defined + +- name: cri-o | configure the uid/gid space for user namespaces + lineinfile: + path: '{{ item.path }}' + line: '{{ item.entry }}' + regex: '^\s*{{ crio_remap_user }}:' + state: '{{ "present" if crio_remap_enable | bool else "absent" }}' + loop: + - path: /etc/subuid + entry: '{{ crio_remap_user }}:{{ crio_subuid_start }}:{{ crio_subuid_length }}' + - path: /etc/subgid + entry: '{{ crio_remap_user }}:{{ crio_subgid_start }}:{{ crio_subgid_length }}' + loop_control: + label: '{{ item.path }}' + +- name: cri-o | ensure crio service is started and enabled + service: + name: crio + daemon_reload: true + enabled: true + state: started + register: service_start + +- name: cri-o | trigger service restart only when needed + service: # noqa 503 + name: crio + state: restarted + when: + - config_install.changed + - reg_auth_install.changed + - not service_start.changed + +- name: cri-o | verify that crio is running + command: "{{ bin_dir }}/crio-status info" + register: get_crio_info + until: get_crio_info is succeeded + changed_when: false + retries: 5 + delay: "{{ retry_stagger | random + 3 }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/reset.yml b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/reset.yml new file mode 100644 index 0000000..f5e0e54 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/reset.yml @@ -0,0 +1,101 @@ +--- +- name: CRI-O | Kubic repo name for debian os family + set_fact: + crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" + when: ansible_os_family == "Debian" + tags: + - reset_crio + +- name: CRI-O | Remove kubic apt repo + apt_repository: + repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /" + state: absent + when: crio_kubic_debian_repo_name is defined + tags: + - reset_crio + +- name: CRI-O | Remove cri-o apt repo + apt_repository: + repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /" + state: present + filename: devel-kubic-libcontainers-stable-cri-o + when: crio_kubic_debian_repo_name is defined + tags: + - reset_crio + +- name: CRI-O | Remove CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + state: absent + when: ansible_distribution in ["Amazon"] + tags: + - reset_crio + +- name: CRI-O | Remove CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution not in ["Amazon", "Fedora"] + tags: + - reset_crio + +- name: CRI-O | Run yum-clean-metadata + command: yum clean metadata + args: + warn: no + when: + - ansible_os_family == "RedHat" + tags: + - reset_crio + +- name: CRI-O | Remove crictl + file: + name: "{{ item }}" + state: absent + loop: + - /etc/crictl.yaml + - "{{ bin_dir }}/crictl" + tags: + - reset_crio + +- name: CRI-O | Stop crio service + service: + name: crio + daemon_reload: true + enabled: false + masked: true + state: stopped + tags: + - reset_crio + +- name: CRI-O | Remove CRI-O configuration files + file: + name: "{{ item }}" + state: absent + loop: + - /etc/crio + - /etc/containers + - /etc/systemd/system/crio.service.d + tags: + - reset_crio + +- name: CRI-O | Remove dpkg hold + dpkg_selections: + name: "{{ item }}" + selection: install + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: "{{ crio_packages }}" + tags: + - reset_crio + +- name: CRI-O | Uninstall CRI-O package + package: + name: "{{ item }}" + state: absent + when: not is_ostree + with_items: "{{ crio_packages }}" + tags: + - reset_crio diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/setup-amazon.yaml b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/setup-amazon.yaml new file mode 100644 index 0000000..3690367 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/tasks/setup-amazon.yaml @@ -0,0 +1,38 @@ +--- +- name: Check that amzn2-extras.repo exists + stat: + path: /etc/yum.repos.d/amzn2-extras.repo + register: amzn2_extras_file_stat + +- name: Find docker repo in amzn2-extras.repo file + lineinfile: + dest: /etc/yum.repos.d/amzn2-extras.repo + line: "[amzn2extra-docker]" + check_mode: yes + register: amzn2_extras_docker_repo + when: + - amzn2_extras_file_stat.stat.exists + +- name: Remove docker repository + ini_file: + dest: /etc/yum.repos.d/amzn2-extras.repo + section: amzn2extra-docker + option: enabled + value: "0" + backup: yes + mode: 0644 + when: + - amzn2_extras_file_stat.stat.exists + - not amzn2_extras_docker_repo.changed + +- name: Add container-selinux yum repo + yum_repository: + name: copr:copr.fedorainfracloud.org:lsm5:container-selinux + file: _copr_lsm5-container-selinux.repo + description: Copr repo for container-selinux owned by lsm5 + baseurl: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/epel-7-$basearch/ + gpgcheck: yes + gpgkey: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/pubkey.gpg + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/config.json.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/config.json.j2 new file mode 100644 index 0000000..522ade7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/config.json.j2 @@ -0,0 +1,17 @@ +{% if crio_registry_auth is defined and crio_registry_auth|length %} +{ +{% for reg in crio_registry_auth %} + "auths": { + "{{ reg.registry }}": { + "auth": "{{ (reg.username + ':' + reg.password) | string | b64encode }}" + } +{% if not loop.last %} + }, +{% else %} + } +{% endif %} +{% endfor %} +} +{% else %} +{} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/crio.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/crio.conf.j2 new file mode 100644 index 0000000..1a25e09 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/crio.conf.j2 @@ -0,0 +1,369 @@ +# The CRI-O configuration file specifies all of the available configuration +# options and command-line flags for the crio(8) OCI Kubernetes Container Runtime +# daemon, but in a TOML format that can be more easily modified and versioned. +# +# Please refer to crio.conf(5) for details of all configuration options. + +# CRI-O supports partial configuration reload during runtime, which can be +# done by sending SIGHUP to the running process. Currently supported options +# are explicitly mentioned with: 'This option supports live configuration +# reload'. + +# CRI-O reads its storage defaults from the containers-storage.conf(5) file +# located at /etc/containers/storage.conf. Modify this storage configuration if +# you want to change the system's defaults. If you want to modify storage just +# for CRI-O, you can change the storage configuration options here. +[crio] + +# Path to the "root directory". CRI-O stores all of its data, including +# containers images, in this directory. +root = "/var/lib/containers/storage" + +# Path to the "run directory". CRI-O stores all of its state in this directory. +runroot = "/var/run/containers/storage" + +# Storage driver used to manage the storage of images and containers. Please +# refer to containers-storage.conf(5) to see all available storage drivers. +{% if crio_storage_driver is defined %} +storage_driver = "{{ crio_storage_driver }}" +{% endif %} + +# List to pass options to the storage driver. Please refer to +# containers-storage.conf(5) to see all available storage options. +#storage_option = [ +#] + +# The default log directory where all logs will go unless directly specified by +# the kubelet. The log directory specified must be an absolute directory. +log_dir = "/var/log/crio/pods" + +# Location for CRI-O to lay down the temporary version file. +# It is used to check if crio wipe should wipe containers, which should +# always happen on a node reboot +version_file = "/var/run/crio/version" + +# Location for CRI-O to lay down the persistent version file. +# It is used to check if crio wipe should wipe images, which should +# only happen when CRI-O has been upgraded +version_file_persist = "/var/lib/crio/version" + +# The crio.api table contains settings for the kubelet/gRPC interface. +[crio.api] + +# Path to AF_LOCAL socket on which CRI-O will listen. +listen = "/var/run/crio/crio.sock" + +# IP address on which the stream server will listen. +stream_address = "127.0.0.1" + +# The port on which the stream server will listen. If the port is set to "0", then +# CRI-O will allocate a random free port number. +stream_port = "{{ crio_stream_port }}" + +# Enable encrypted TLS transport of the stream server. +stream_enable_tls = false + +# Path to the x509 certificate file used to serve the encrypted stream. This +# file can change, and CRI-O will automatically pick up the changes within 5 +# minutes. +stream_tls_cert = "" + +# Path to the key file used to serve the encrypted stream. This file can +# change and CRI-O will automatically pick up the changes within 5 minutes. +stream_tls_key = "" + +# Path to the x509 CA(s) file used to verify and authenticate client +# communication with the encrypted stream. This file can change and CRI-O will +# automatically pick up the changes within 5 minutes. +stream_tls_ca = "" + +# Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024. +grpc_max_send_msg_size = 16777216 + +# Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024. +grpc_max_recv_msg_size = 16777216 + +# The crio.runtime table contains settings pertaining to the OCI runtime used +# and options for how to set up and manage the OCI runtime. +[crio.runtime] + +# A list of ulimits to be set in containers by default, specified as +# "=:", for example: +# "nofile=1024:2048" +# If nothing is set here, settings will be inherited from the CRI-O daemon +#default_ulimits = [ +#] + +# default_runtime is the _name_ of the OCI runtime to be used as the default. +# The name is matched against the runtimes map below. +default_runtime = "runc" + +# If true, the runtime will not use pivot_root, but instead use MS_MOVE. +no_pivot = false + +# decryption_keys_path is the path where the keys required for +# image decryption are stored. This option supports live configuration reload. +decryption_keys_path = "/etc/crio/keys/" + +# Path to the conmon binary, used for monitoring the OCI runtime. +# Will be searched for using $PATH if empty. +conmon = "{{ crio_conmon }}" + +# Cgroup setting for conmon +{% if crio_cgroup_manager == "cgroupfs" %} +conmon_cgroup = "pod" +{% else %} +conmon_cgroup = "system.slice" +{% endif %} + +# Environment variable list for the conmon process, used for passing necessary +# environment variables to conmon or the runtime. +conmon_env = [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +] + +# Additional environment variables to set for all the +# containers. These are overridden if set in the +# container image spec or in the container runtime configuration. +default_env = [ +] + +# If true, SELinux will be used for pod separation on the host. +selinux = {{ crio_selinux }} + +# Path to the seccomp.json profile which is used as the default seccomp profile +# for the runtime. If not specified, then the internal default seccomp profile +# will be used. This option supports live configuration reload. +seccomp_profile = "{{ crio_seccomp_profile }}" + +# Used to change the name of the default AppArmor profile of CRI-O. The default +# profile name is "crio-default". This profile only takes effect if the user +# does not specify a profile via the Kubernetes Pod's metadata annotation. If +# the profile is set to "unconfined", then this equals to disabling AppArmor. +# This option supports live configuration reload. +# apparmor_profile = "crio-default" + +# Cgroup management implementation used for the runtime. +cgroup_manager = "{{ crio_cgroup_manager }}" + +# List of default capabilities for containers. If it is empty or commented out, +# only the capabilities defined in the containers json file by the user/kube +# will be added. +default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "NET_RAW", + "SETGID", + "SETUID", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", +] + +# List of default sysctls. If it is empty or commented out, only the sysctls +# defined in the container json file by the user/kube will be added. +default_sysctls = [ +] + +# List of additional devices. specified as +# "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". +#If it is empty or commented out, only the devices +# defined in the container json file by the user/kube will be added. +additional_devices = [ +] + +# Path to OCI hooks directories for automatically executed hooks. If one of the +# directories does not exist, then CRI-O will automatically skip them. +hooks_dir = [ + "/usr/share/containers/oci/hooks.d", +] + +# List of default mounts for each container. **Deprecated:** this option will +# be removed in future versions in favor of default_mounts_file. +default_mounts = [ +] + +# Path to the file specifying the defaults mounts for each container. The +# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads +# its default mounts from the following two files: +# +# 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the +# override file, where users can either add in their own default mounts, or +# override the default mounts shipped with the package. +# +# 2) /usr/share/containers/mounts.conf: This is the default file read for +# mounts. If you want CRI-O to read from a different, specific mounts file, +# you can change the default_mounts_file. Note, if this is done, CRI-O will +# only add mounts it finds in this file. +# +#default_mounts_file = "" + +# Maximum number of processes allowed in a container. +pids_limit = {{ crio_pids_limit }} + +# Maximum sized allowed for the container log file. Negative numbers indicate +# that no size limit is imposed. If it is positive, it must be >= 8192 to +# match/exceed conmon's read buffer. The file is truncated and re-opened so the +# limit is never exceeded. +log_size_max = -1 + +# Whether container output should be logged to journald in addition to the kuberentes log file +log_to_journald = false + +# Path to directory in which container exit files are written to by conmon. +container_exits_dir = "/var/run/crio/exits" + +# Path to directory for container attach sockets. +container_attach_socket_dir = "/var/run/crio" + +# The prefix to use for the source of the bind mounts. +bind_mount_prefix = "" + +# If set to true, all containers will run in read-only mode. +read_only = false + +# Changes the verbosity of the logs based on the level it is set to. Options +# are fatal, panic, error, warn, info, debug and trace. This option supports +# live configuration reload. +log_level = "{{ crio_log_level }}" + +# Filter the log messages by the provided regular expression. +# This option supports live configuration reload. +log_filter = "" + +# The UID mappings for the user namespace of each container. A range is +# specified in the form containerUID:HostUID:Size. Multiple ranges must be +# separated by comma. +uid_mappings = "" + +# The GID mappings for the user namespace of each container. A range is +# specified in the form containerGID:HostGID:Size. Multiple ranges must be +# separated by comma. +gid_mappings = "" + +# The minimal amount of time in seconds to wait before issuing a timeout +# regarding the proper termination of the container. The lowest possible +# value is 30s, whereas lower values are not considered by CRI-O. +ctr_stop_timeout = 30 + +# **DEPRECATED** this option is being replaced by manage_ns_lifecycle, which is described below. +# manage_network_ns_lifecycle = false + +# manage_ns_lifecycle determines whether we pin and remove namespaces +# and manage their lifecycle +{% if kata_containers_enabled %} +manage_ns_lifecycle = true +{% else %} +manage_ns_lifecycle = false +{% endif %} + +# The directory where the state of the managed namespaces gets tracked. +# Only used when manage_ns_lifecycle is true. +namespaces_dir = "/var/run" + +# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle +pinns_path = "" + +# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. +# The runtime to use is picked based on the runtime_handler provided by the CRI. +# If no runtime_handler is provided, the runtime will be picked based on the level +# of trust of the workload. Each entry in the table should follow the format: +# +#[crio.runtime.runtimes.runtime-handler] +# runtime_path = "/path/to/the/executable" +# runtime_type = "oci" +# runtime_root = "/path/to/the/root" +# +# Where: +# - runtime-handler: name used to identify the runtime +# - runtime_path (optional, string): absolute path to the runtime executable in +# the host filesystem. If omitted, the runtime-handler identifier should match +# the runtime executable name, and the runtime executable should be placed +# in $PATH. +# - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If +# omitted, an "oci" runtime is assumed. +# - runtime_root (optional, string): root directory for storage of containers +# state. + +{% for runtime in crio_runtimes %} +[crio.runtime.runtimes.{{ runtime.name }}] +runtime_path = "{{ runtime.path }}" +runtime_type = "{{ runtime.type }}" +runtime_root = "{{ runtime.root }}" +privileged_without_host_devices = {{ runtime.privileged_without_host_devices|default(false)|lower }} +allowed_annotations = {{ runtime.allowed_annotations|default([])|to_json }} +{% endfor %} + +# Kata Containers with the Firecracker VMM +#[crio.runtime.runtimes.kata-fc] + +# The crio.image table contains settings pertaining to the management of OCI images. +# +# CRI-O reads its configured registries defaults from the system wide +# containers-registries.conf(5) located in /etc/containers/registries.conf. If +# you want to modify just CRI-O, you can change the registries configuration in +# this file. Otherwise, leave insecure_registries and registries commented out to +# use the system's defaults from /etc/containers/registries.conf. +[crio.image] + +# Default transport for pulling images from a remote container storage. +default_transport = "docker://" + +# The path to a file containing credentials necessary for pulling images from +# secure registries. The file is similar to that of /var/lib/kubelet/config.json +global_auth_file = "/etc/crio/config.json" + +# The image used to instantiate infra containers. +# This option supports live configuration reload. +pause_image = "{{ crio_pause_image }}" + +# The path to a file containing credentials specific for pulling the pause_image from +# above. The file is similar to that of /var/lib/kubelet/config.json +# This option supports live configuration reload. +pause_image_auth_file = "" + +# The command to run to have a container stay in the paused state. +# When explicitly set to "", it will fallback to the entrypoint and command +# specified in the pause image. When commented out, it will fallback to the +# default: "/pause". This option supports live configuration reload. +pause_command = "/pause" + +# Path to the file which decides what sort of policy we use when deciding +# whether or not to trust an image that we've pulled. It is not recommended that +# this option be used, as the default behavior of using the system-wide default +# policy (i.e., /etc/containers/policy.json) is most often preferred. Please +# refer to containers-policy.json(5) for more details. +signature_policy = "{{ crio_signature_policy }}" + +# Controls how image volumes are handled. The valid values are mkdir, bind and +# ignore; the latter will ignore volumes entirely. +image_volumes = "mkdir" + +# The crio.network table containers settings pertaining to the management of +# CNI plugins. +[crio.network] + +# The default CNI network name to be selected. If not set or "", then +# CRI-O will pick-up the first one found in network_dir. +# cni_default_network = "" + +# Path to the directory where CNI configuration files are located. +network_dir = "/etc/cni/net.d/" + +# Paths to directories where CNI plugin binaries are located. +plugin_dirs = [ + "/opt/cni/bin", + "/usr/libexec/cni", +] + +# A necessary configuration for Prometheus based metrics retrieval +[crio.metrics] + +# Globally enable or disable metrics support. +enable_metrics = {{ crio_enable_metrics | bool | lower }} + +# The port on which the metrics server will listen. +metrics_port = {{ crio_metrics_port }} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/http-proxy.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..212f30f --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/registry.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/registry.conf.j2 new file mode 100644 index 0000000..38368f9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/registry.conf.j2 @@ -0,0 +1,13 @@ +[[registry]] +prefix = "{{ item.prefix | default(item.location) }}" +insecure = {{ item.insecure | default('false') | string | lower }} +blocked = {{ item.blocked | default('false') | string | lower }} +location = "{{ item.location }}" +{% if item.mirrors is defined %} +{% for mirror in item.mirrors %} + +[[registry.mirror]] +location = "{{ mirror.location }}" +insecure = {{ mirror.insecure | default('false') | string | lower }} +{% endfor %} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/unqualified.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/unqualified.conf.j2 new file mode 100644 index 0000000..fc91f8b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/cri-o/templates/unqualified.conf.j2 @@ -0,0 +1,10 @@ +{%- set _unqualified_registries = [] -%} +{% for _registry in crio_registries if _registry.unqualified -%} +{% if _registry.prefix is defined -%} +{{ _unqualified_registries.append(_registry.prefix) }} +{% else %} +{{ _unqualified_registries.append(_registry.location) }} +{%- endif %} +{%- endfor %} + +unqualified-search-registries = {{ _unqualified_registries | string }} diff --git a/kubespray/extra_playbooks/roles/container-engine/crictl/handlers/main.yml b/kubespray/extra_playbooks/roles/container-engine/crictl/handlers/main.yml new file mode 100644 index 0000000..5319586 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/crictl/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Get crictl completion + command: "{{ bin_dir }}/crictl completion" + changed_when: False + register: cri_completion + check_mode: false + +- name: Install crictl completion + copy: + dest: /etc/bash_completion.d/crictl + content: "{{ cri_completion.stdout }}" + mode: 0644 diff --git a/kubespray/extra_playbooks/roles/container-engine/crictl/tasks/crictl.yml b/kubespray/extra_playbooks/roles/container-engine/crictl/tasks/crictl.yml new file mode 100644 index 0000000..36e09e4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/crictl/tasks/crictl.yml @@ -0,0 +1,22 @@ +--- +- name: crictl | Download crictl + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.crictl) }}" + +- name: Install crictl config + template: + src: crictl.yaml.j2 + dest: /etc/crictl.yaml + owner: root + mode: 0644 + +- name: Copy crictl binary from download dir + copy: + src: "{{ local_release_dir }}/crictl" + dest: "{{ bin_dir }}/crictl" + mode: 0755 + remote_src: true + notify: + - Get crictl completion + - Install crictl completion diff --git a/kubespray/extra_playbooks/roles/container-engine/crictl/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/crictl/tasks/main.yml new file mode 100644 index 0000000..99ed216 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/crictl/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: install crictĺ + include_tasks: crictl.yml diff --git a/kubespray/extra_playbooks/roles/container-engine/crictl/templates/crictl.yaml.j2 b/kubespray/extra_playbooks/roles/container-engine/crictl/templates/crictl.yaml.j2 new file mode 100644 index 0000000..b97dbef --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/crictl/templates/crictl.yaml.j2 @@ -0,0 +1,4 @@ +runtime-endpoint: {{ cri_socket }} +image-endpoint: {{ cri_socket }} +timeout: 30 +debug: false diff --git a/kubespray/extra_playbooks/roles/container-engine/crun/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/crun/defaults/main.yml new file mode 100644 index 0000000..65e08d7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/crun/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +crun_bin_dir: /usr/bin/ diff --git a/kubespray/extra_playbooks/roles/container-engine/crun/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/crun/tasks/main.yml new file mode 100644 index 0000000..d541a49 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/crun/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: crun | Download crun binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.crun) }}" + +- name: Copy crun binary from download dir + copy: + src: "{{ local_release_dir }}/crun" + dest: "{{ crun_bin_dir }}/crun" + mode: 0755 + remote_src: true diff --git a/kubespray/extra_playbooks/roles/container-engine/docker-storage/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/docker-storage/defaults/main.yml new file mode 100644 index 0000000..6a69556 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker-storage/defaults/main.yml @@ -0,0 +1,19 @@ +--- +docker_container_storage_setup_repository: https://github.com/projectatomic/container-storage-setup.git +docker_container_storage_setup_version: v0.6.0 +docker_container_storage_setup_profile_name: kubespray +docker_container_storage_setup_storage_driver: devicemapper +docker_container_storage_setup_container_thinpool: docker-pool +# It must be define a disk path for docker_container_storage_setup_devs. +# Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb +docker_container_storage_setup_data_size: 40%FREE +docker_container_storage_setup_min_data_size: 2G +docker_container_storage_setup_chunk_size: 512K +docker_container_storage_setup_growpart: "false" +docker_container_storage_setup_auto_extend_pool: "yes" +docker_container_storage_setup_pool_autoextend_threshold: 60 +docker_container_storage_setup_pool_autoextend_percent: 20 +docker_container_storage_setup_device_wait_timeout: 60 +docker_container_storage_setup_wipe_signatures: "false" +docker_container_storage_setup_container_root_lv_size: 40%FREE diff --git a/kubespray/extra_playbooks/roles/container-engine/docker-storage/files/install_container_storage_setup.sh b/kubespray/extra_playbooks/roles/container-engine/docker-storage/files/install_container_storage_setup.sh new file mode 100644 index 0000000..604c843 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker-storage/files/install_container_storage_setup.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +set -e + +repository=${1:-https://github.com/projectatomic/container-storage-setup.git} +version=${2:-master} +profile_name=${3:-kubespray} +dir=`mktemp -d` +export GIT_DIR=$dir/.git +export GIT_WORK_TREE=$dir + +git init +git fetch --depth 1 $repository $version +git merge FETCH_HEAD +make -C $dir install +rm -rf /var/lib/container-storage-setup/$profile_name $dir + +set +e + +/usr/bin/container-storage-setup create $profile_name /etc/sysconfig/docker-storage-setup && /usr/bin/container-storage-setup activate $profile_name +# FIXME: exit status can be 1 for both fatal and non fatal errors in current release, +# could be improved by matching error strings +exit 0 diff --git a/kubespray/extra_playbooks/roles/container-engine/docker-storage/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/docker-storage/tasks/main.yml new file mode 100644 index 0000000..4629381 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker-storage/tasks/main.yml @@ -0,0 +1,48 @@ +--- + +- name: docker-storage-setup | install git and make + with_items: [git, make] + package: + pkg: "{{ item }}" + state: present + +- name: docker-storage-setup | docker-storage-setup sysconfig template + template: + src: docker-storage-setup.j2 + dest: /etc/sysconfig/docker-storage-setup + mode: 0644 + +- name: docker-storage-override-directory | docker service storage-setup override dir + file: + dest: /etc/systemd/system/docker.service.d + mode: 0755 + owner: root + group: root + state: directory + +- name: docker-storage-override | docker service storage-setup override file + copy: + dest: /etc/systemd/system/docker.service.d/override.conf + content: |- + ### This file is managed by Ansible + [Service] + EnvironmentFile=-/etc/sysconfig/docker-storage + + owner: root + group: root + mode: 0644 + +# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository +- name: docker-storage-setup | install lvm2 + package: + name: lvm2 + state: present + +- name: docker-storage-setup | install and run container-storage-setup + become: yes + script: | + install_container_storage_setup.sh \ + {{ docker_container_storage_setup_repository }} \ + {{ docker_container_storage_setup_version }} \ + {{ docker_container_storage_setup_profile_name }} + notify: Docker | reload systemd diff --git a/kubespray/extra_playbooks/roles/container-engine/docker-storage/templates/docker-storage-setup.j2 b/kubespray/extra_playbooks/roles/container-engine/docker-storage/templates/docker-storage-setup.j2 new file mode 100644 index 0000000..1a502b2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker-storage/templates/docker-storage-setup.j2 @@ -0,0 +1,35 @@ +{%if docker_container_storage_setup_storage_driver is defined%}STORAGE_DRIVER={{docker_container_storage_setup_storage_driver}}{%endif%} + +{%if docker_container_storage_setup_extra_storage_options is defined%}EXTRA_STORAGE_OPTIONS={{docker_container_storage_setup_extra_storage_options}}{%endif%} + +{%if docker_container_storage_setup_devs is defined%}DEVS={{docker_container_storage_setup_devs}}{%endif%} + +{%if docker_container_storage_setup_container_thinpool is defined%}CONTAINER_THINPOOL={{docker_container_storage_setup_container_thinpool}}{%endif%} + +{%if docker_container_storage_setup_vg is defined%}VG={{docker_container_storage_setup_vg}}{%endif%} + +{%if docker_container_storage_setup_root_size is defined%}ROOT_SIZE={{docker_container_storage_setup_root_size}}{%endif%} + +{%if docker_container_storage_setup_data_size is defined%}DATA_SIZE={{docker_container_storage_setup_data_size}}{%endif%} + +{%if docker_container_storage_setup_min_data_size is defined%}MIN_DATA_SIZE={{docker_container_storage_setup_min_data_size}}{%endif%} + +{%if docker_container_storage_setup_chunk_size is defined%}CHUNK_SIZE={{docker_container_storage_setup_chunk_size}}{%endif%} + +{%if docker_container_storage_setup_growpart is defined%}GROWPART={{docker_container_storage_setup_growpart}}{%endif%} + +{%if docker_container_storage_setup_auto_extend_pool is defined%}AUTO_EXTEND_POOL={{docker_container_storage_setup_auto_extend_pool}}{%endif%} + +{%if docker_container_storage_setup_pool_autoextend_threshold is defined%}POOL_AUTOEXTEND_THRESHOLD={{docker_container_storage_setup_pool_autoextend_threshold}}{%endif%} + +{%if docker_container_storage_setup_pool_autoextend_percent is defined%}POOL_AUTOEXTEND_PERCENT={{docker_container_storage_setup_pool_autoextend_percent}}{%endif%} + +{%if docker_container_storage_setup_device_wait_timeout is defined%}DEVICE_WAIT_TIMEOUT={{docker_container_storage_setup_device_wait_timeout}}{%endif%} + +{%if docker_container_storage_setup_wipe_signatures is defined%}WIPE_SIGNATURES={{docker_container_storage_setup_wipe_signatures}}{%endif%} + +{%if docker_container_storage_setup_container_root_lv_name is defined%}CONTAINER_ROOT_LV_NAME={{docker_container_storage_setup_container_root_lv_name}}{%endif%} + +{%if docker_container_storage_setup_container_root_lv_size is defined%}CONTAINER_ROOT_LV_SIZE={{docker_container_storage_setup_container_root_lv_size}}{%endif%} + +{%if docker_container_storage_setup_container_root_lv_mount_path is defined%}CONTAINER_ROOT_LV_MOUNT_PATH={{docker_container_storage_setup_container_root_lv_mount_path}}{%endif%} diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/docker/defaults/main.yml new file mode 100644 index 0000000..91227f9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/defaults/main.yml @@ -0,0 +1,64 @@ +--- +docker_version: '20.10' +docker_cli_version: "{{ docker_version }}" + +docker_package_info: + pkgs: + +docker_repo_key_info: + repo_keys: + +docker_repo_info: + repos: + +docker_cgroup_driver: systemd + +docker_bin_dir: "/usr/bin" + +# flag to enable/disable docker cleanup +docker_orphan_clean_up: false + +# old docker package names to be removed +docker_remove_packages_yum: + - docker + - docker-common + - docker-engine + - docker-selinux.noarch + - docker-client + - docker-client-latest + - docker-latest + - docker-latest-logrotate + - docker-logrotate + - docker-engine-selinux.noarch + +# remove podman to avoid containerd.io confliction +podman_remove_packages_yum: + - podman + +docker_remove_packages_apt: + - docker + - docker-engine + - docker.io + +# Docker specific repos should be part of the docker role not containerd-common anymore +# Optional values for containerd apt repo +containerd_package_info: + pkgs: + +# Fedora docker-ce repo +docker_fedora_repo_base_url: 'https://download.docker.com/linux/fedora/{{ ansible_distribution_major_version }}/$basearch/stable' +docker_fedora_repo_gpgkey: 'https://download.docker.com/linux/fedora/gpg' + +# CentOS/RedHat docker-ce repo +docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/{{ ansible_distribution_major_version }}/$basearch/stable' +docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' + +# Ubuntu docker-ce repo +docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg' +docker_ubuntu_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88' + +# Debian docker-ce repo +docker_debian_repo_base_url: "https://download.docker.com/linux/debian" +docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg' +docker_debian_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88' diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/files/cleanup-docker-orphans.sh b/kubespray/extra_playbooks/roles/container-engine/docker/files/cleanup-docker-orphans.sh new file mode 100644 index 0000000..d7a9a8f --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/files/cleanup-docker-orphans.sh @@ -0,0 +1,38 @@ +#!/bin/bash +list_descendants () +{ + local children=$(ps -o pid= --ppid "$1") + for pid in $children + do + list_descendants "$pid" + done + [[ -n "$children" ]] && echo "$children" +} + +shim_search="^docker-containerd-shim|^containerd-shim" +count_shim_processes=$(pgrep -f $shim_search | wc -l) + +if [ ${count_shim_processes} -gt 0 ]; then + # Find all container pids from shims + orphans=$(pgrep -P $(pgrep -d ',' -f $shim_search) |\ + # Filter out valid docker pids, leaving the orphans + egrep -v $(docker ps -q | xargs docker inspect --format '{{.State.Pid}}' | awk '{printf "%s%s",sep,$1; sep="|"}')) + + if [[ -n "$orphans" && -n "$(ps -o ppid= $orphans)" ]] + then + # Get shim pids of orphans + orphan_shim_pids=$(ps -o pid= $(ps -o ppid= $orphans)) + + # Find all orphaned container PIDs + orphan_container_pids=$(for pid in $orphan_shim_pids; do list_descendants $pid; done) + + # Recursively kill all child PIDs of orphan shims + echo -e "Killing orphan container PIDs and descendants: \n$(ps -O ppid= $orphan_container_pids)" + kill -9 $orphan_container_pids || true + + else + echo "No orphaned containers found" + fi +else + echo "The node doesn't have any shim processes." +fi diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/handlers/main.yml b/kubespray/extra_playbooks/roles/container-engine/docker/handlers/main.yml new file mode 100644 index 0000000..8c26de2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/handlers/main.yml @@ -0,0 +1,32 @@ +--- +- name: restart docker + command: /bin/true + notify: + - Docker | reload systemd + - Docker | reload docker.socket + - Docker | reload docker + - Docker | wait for docker + +- name: Docker | reload systemd + systemd: + name: docker + daemon_reload: true + masked: no + +- name: Docker | reload docker.socket + service: + name: docker.socket + state: restarted + when: ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] or is_fedora_coreos + +- name: Docker | reload docker + service: + name: docker + state: restarted + +- name: Docker | wait for docker + command: "{{ docker_bin_dir }}/docker images" + register: docker_ready + retries: 20 + delay: 1 + until: docker_ready.rc == 0 diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/meta/main.yml b/kubespray/extra_playbooks/roles/container-engine/docker/meta/main.yml new file mode 100644 index 0000000..d7e4751 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: container-engine/containerd-common + - role: container-engine/docker-storage + when: docker_container_storage_setup and ansible_os_family == "RedHat" diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/tasks/docker_plugin.yml b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/docker_plugin.yml new file mode 100644 index 0000000..8ee530e --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/docker_plugin.yml @@ -0,0 +1,8 @@ +--- +- name: Install Docker plugin + command: docker plugin install --grant-all-permissions {{ docker_plugin | quote }} + when: docker_plugin is defined + register: docker_plugin_status + failed_when: + - docker_plugin_status.failed + - '"already exists" not in docker_plugin_status.stderr' diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/main.yml new file mode 100644 index 0000000..ae7b574 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/main.yml @@ -0,0 +1,177 @@ +--- +- name: check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: set docker_version for openEuler + set_fact: + docker_version: '19.03' + when: ansible_distribution == "openEuler" + tags: + - facts + +- name: gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_distribution.split(' ')[0]|lower }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + tags: + - facts + +- name: Warn about Docker version on SUSE + debug: + msg: "SUSE distributions always install Docker from the distro repos" + when: ansible_pkg_mgr == 'zypper' + +- include_tasks: set_facts_dns.yml + when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' + tags: + - facts + +- import_tasks: pre-upgrade.yml + +- name: ensure docker-ce repository public key is installed + apt_key: + id: "{{ item }}" + url: "{{ docker_repo_key_info.url }}" + state: present + register: keyserver_task_result + until: keyserver_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" + with_items: "{{ docker_repo_key_info.repo_keys }}" + environment: "{{ proxy_env }}" + when: ansible_pkg_mgr == 'apt' + +- name: ensure docker-ce repository is enabled + apt_repository: + repo: "{{ item }}" + state: present + with_items: "{{ docker_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + +- name: Configure docker repository on Fedora + template: + src: "fedora_docker.repo.j2" + dest: "{{ yum_repo_dir }}/docker.repo" + mode: 0644 + when: ansible_distribution == "Fedora" and not is_ostree + +- name: Configure docker repository on RedHat/CentOS/OracleLinux/AlmaLinux/KylinLinux + template: + src: "rh_docker.repo.j2" + dest: "{{ yum_repo_dir }}/docker-ce.repo" + mode: 0644 + when: + - ansible_os_family == "RedHat" + - ansible_distribution != "Fedora" + - not is_ostree + +- name: Remove dpkg hold + dpkg_selections: + name: "{{ item }}" + selection: install + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: + - "{{ containerd_package }}" + - docker-ce + - docker-ce-cli + +- name: ensure docker packages are installed + package: + name: "{{ docker_package_info.pkgs }}" + state: "{{ docker_package_info.state | default('present') }}" + module_defaults: + apt: + update_cache: true + dnf: + enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}" + disablerepo: "{{ docker_package_info.disablerepo | default(omit) }}" + yum: + enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}" + zypper: + update_cache: true + register: docker_task_result + until: docker_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" + notify: restart docker + when: + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - not is_ostree + - docker_package_info.pkgs|length > 0 + +# This is required to ensure any apt upgrade will not break kubernetes +- name: Tell Debian hosts not to change the docker version with apt upgrade + dpkg_selections: + name: "{{ item }}" + selection: hold + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: + - "{{ containerd_package }}" + - docker-ce + - docker-ce-cli + +- name: ensure docker started, remove our config if docker start failed and try again + block: + - name: ensure service is started if docker packages are already present + service: + name: docker + state: started + when: docker_task_result is not changed + rescue: + - debug: # noqa unnamed-task + msg: "Docker start failed. Try to remove our config" + - name: remove kubespray generated config + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/systemd/system/docker.service.d/http-proxy.conf + - /etc/systemd/system/docker.service.d/docker-options.conf + - /etc/systemd/system/docker.service.d/docker-dns.conf + - /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf + notify: restart docker + +- name: flush handlers so we can wait for docker to come up + meta: flush_handlers + +# Install each plugin using a looped include to make error handling in the included task simpler. +- include_tasks: docker_plugin.yml + loop: "{{ docker_plugins }}" + loop_control: + loop_var: docker_plugin + +- name: Set docker systemd config + import_tasks: systemd.yml + +- name: ensure docker service is started and enabled + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - docker diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/tasks/pre-upgrade.yml b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/pre-upgrade.yml new file mode 100644 index 0000000..f346b46 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/pre-upgrade.yml @@ -0,0 +1,36 @@ +--- +- name: Remove legacy docker repo file + file: + path: "{{ yum_repo_dir }}/docker.repo" + state: absent + when: + - ansible_os_family == 'RedHat' + - not is_ostree + +- name: Ensure old versions of Docker are not installed. | Debian + apt: + name: '{{ docker_remove_packages_apt }}' + state: absent + when: + - ansible_os_family == 'Debian' + - (docker_versioned_pkg[docker_version | string] is search('docker-ce')) + + +- name: Ensure podman not installed. | RedHat + package: + name: '{{ podman_remove_packages_yum }}' + state: absent + when: + - ansible_os_family == 'RedHat' + - (docker_versioned_pkg[docker_version | string] is search('docker-ce')) + - not is_ostree + + +- name: Ensure old versions of Docker are not installed. | RedHat + package: + name: '{{ docker_remove_packages_yum }}' + state: absent + when: + - ansible_os_family == 'RedHat' + - (docker_versioned_pkg[docker_version | string] is search('docker-ce')) + - not is_ostree diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/tasks/reset.yml b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/reset.yml new file mode 100644 index 0000000..76d125b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/reset.yml @@ -0,0 +1,106 @@ +--- + +- name: Docker | Get package facts + package_facts: + manager: auto + +- name: Docker | Find docker packages + set_fact: + docker_packages_list: "{{ ansible_facts.packages.keys() | select('search', '^docker*') }}" + containerd_package: "{{ ansible_facts.packages.keys() | select('search', '^containerd*') }}" + +- name: Docker | Stop all running container + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -q | xargs -r {{ docker_bin_dir }}/docker kill" + args: + executable: /bin/bash + register: stop_all_containers + retries: 5 + until: stop_all_containers.rc == 0 + changed_when: true + delay: 5 + ignore_errors: true # noqa ignore-errors + when: docker_packages_list|length>0 + +- name: reset | remove all containers + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" + args: + executable: /bin/bash + register: remove_all_containers + retries: 4 + until: remove_all_containers.rc == 0 + delay: 5 + when: docker_packages_list|length>0 + +- name: Docker | Stop docker service + service: + name: "{{ item }}" + enabled: false + state: stopped + loop: + - docker + - docker.socket + - containerd + when: docker_packages_list|length>0 + +- name: Docker | Remove dpkg hold + dpkg_selections: + name: "{{ item }}" + selection: install + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: + - "{{ docker_packages_list }}" + - "{{ containerd_package }}" + +- name: Docker | Remove docker package + package: + name: "{{ item }}" + state: absent + changed_when: false + with_items: + - "{{ docker_packages_list }}" + - "{{ containerd_package }}" + when: + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - not is_ostree + - docker_packages_list|length > 0 + +- name: Docker | ensure docker-ce repository is removed + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ docker_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + +- name: Docker | Remove docker repository on Fedora + file: + name: "{{ yum_repo_dir }}/docker.repo" + state: absent + when: ansible_distribution == "Fedora" and not is_ostree + +- name: Docker | Remove docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux + file: + name: "{{ yum_repo_dir }}/docker-ce.repo" + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution != "Fedora" + - not is_ostree + +- name: Docker | Remove docker configuration files + file: + name: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/docker.service.d/ + - /etc/systemd/system/docker.socket + - /etc/systemd/system/docker.service + - /etc/systemd/system/containerd.service + - /etc/systemd/system/containerd.service.d + - /var/lib/docker + - /etc/docker + ignore_errors: true # noqa ignore-errors + +- name: Docker | systemctl daemon-reload # noqa 503 + systemd: + daemon_reload: true diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/tasks/set_facts_dns.yml b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/set_facts_dns.yml new file mode 100644 index 0000000..d800373 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/set_facts_dns.yml @@ -0,0 +1,66 @@ +--- + +- name: set dns server for docker + set_fact: + docker_dns_servers: "{{ dns_servers }}" + +- name: show docker_dns_servers + debug: + msg: "{{ docker_dns_servers }}" + +- name: add upstream dns servers + set_fact: + docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}" + when: dns_mode in ['coredns', 'coredns_dual'] + +- name: add global searchdomains + set_fact: + docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" + +- name: check system nameservers + shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' + args: + executable: /bin/bash + changed_when: False + register: system_nameservers + check_mode: no + +- name: check system search domains + # noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false + # Therefore -o pipefail is not applicable in this specific instance + shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/' + args: + executable: /bin/bash + changed_when: False + register: system_search_domains + check_mode: no + +- name: add system nameservers to docker options + set_fact: + docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}" + when: system_nameservers.stdout + +- name: add system search domains to docker options + set_fact: + docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}" + when: system_search_domains.stdout + +- name: check number of nameservers + fail: + msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3." + when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool + +- name: rtrim number of nameservers to 3 + set_fact: + docker_dns_servers: "{{ docker_dns_servers[0:3] }}" + when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool + +- name: check number of search domains + fail: + msg: "Too many search domains" + when: docker_dns_search_domains|length > 6 + +- name: check length of search domains + fail: + msg: "Search domains exceeded limit of 256 characters" + when: docker_dns_search_domains|join(' ')|length > 256 diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/tasks/systemd.yml b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/systemd.yml new file mode 100644 index 0000000..0c040fe --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/tasks/systemd.yml @@ -0,0 +1,68 @@ +--- +- name: Create docker service systemd directory if it doesn't exist + file: + path: /etc/systemd/system/docker.service.d + state: directory + mode: 0755 + +- name: Write docker proxy drop-in + template: + src: http-proxy.conf.j2 + dest: /etc/systemd/system/docker.service.d/http-proxy.conf + mode: 0644 + notify: restart docker + when: http_proxy is defined or https_proxy is defined + +- name: get systemd version + # noqa 303 - systemctl is called intentionally here + shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2 + args: + executable: /bin/bash + register: systemd_version + when: not is_ostree + changed_when: false + check_mode: false + +- name: Write docker.service systemd file + template: + src: docker.service.j2 + dest: /etc/systemd/system/docker.service + mode: 0644 + register: docker_service_file + notify: restart docker + when: + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - not is_fedora_coreos + +- name: Write docker options systemd drop-in + template: + src: docker-options.conf.j2 + dest: "/etc/systemd/system/docker.service.d/docker-options.conf" + mode: 0644 + notify: restart docker + +- name: Write docker dns systemd drop-in + template: + src: docker-dns.conf.j2 + dest: "/etc/systemd/system/docker.service.d/docker-dns.conf" + mode: 0644 + notify: restart docker + when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' + +- name: Copy docker orphan clean up script to the node + copy: + src: cleanup-docker-orphans.sh + dest: "{{ bin_dir }}/cleanup-docker-orphans.sh" + mode: 0755 + when: docker_orphan_clean_up | bool + +- name: Write docker orphan clean up systemd drop-in + template: + src: docker-orphan-cleanup.conf.j2 + dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf" + mode: 0644 + notify: restart docker + when: docker_orphan_clean_up | bool + +- name: Flush handlers + meta: flush_handlers diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-dns.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-dns.conf.j2 new file mode 100644 index 0000000..d501a19 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-dns.conf.j2 @@ -0,0 +1,6 @@ +[Service] +Environment="DOCKER_DNS_OPTIONS=\ + {% for d in docker_dns_servers %}--dns {{ d }} {% endfor %} \ + {% for d in docker_dns_search_domains %}--dns-search {{ d }} {% endfor %} \ + {% for o in docker_dns_options %}--dns-opt {{ o }} {% endfor %} \ +" \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-options.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-options.conf.j2 new file mode 100644 index 0000000..ae661ad --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-options.conf.j2 @@ -0,0 +1,11 @@ +[Service] +Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }} \ +--exec-opt native.cgroupdriver={{ docker_cgroup_driver }} \ +{% for i in docker_insecure_registries %}--insecure-registry={{ i }} {% endfor %} \ +{% for i in docker_registry_mirrors %}--registry-mirror={{ i }} {% endfor %} \ +--data-root={{ docker_daemon_graph }} \ +{% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %}" + +{% if docker_mount_flags is defined and docker_mount_flags != "" %} +MountFlags={{ docker_mount_flags }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2 new file mode 100644 index 0000000..787a941 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2 @@ -0,0 +1,2 @@ +[Service] +ExecStartPost=-{{ bin_dir }}/cleanup-docker-orphans.sh \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker.service.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker.service.j2 new file mode 100644 index 0000000..fd1d061 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/docker.service.j2 @@ -0,0 +1,47 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.com +{% if ansible_os_family == "RedHat" %} +After=network.target {{ ' docker-storage-setup.service' if docker_container_storage_setup else '' }} containerd.service +BindsTo=containerd.service +{{ 'Wants=docker-storage-setup.service' if docker_container_storage_setup else '' }} +{% elif ansible_os_family == "Debian" %} +After=network.target docker.socket containerd.service +BindsTo=containerd.service +Wants=docker.socket +{% elif ansible_os_family == "Suse" %} +After=network.target lvm2-monitor.service SuSEfirewall2.service +# After=network.target containerd.service +# BindsTo=containerd.service +{% endif %} + +[Service] +Type=notify +{% if docker_storage_options is defined %} +Environment="DOCKER_STORAGE_OPTIONS={{ docker_storage_options }}" +{% endif %} +Environment=GOTRACEBACK=crash +ExecReload=/bin/kill -s HUP $MAINPID +Delegate=yes +KillMode=process +ExecStart={{ docker_bin_dir }}/dockerd \ +{% if ansible_os_family == "Suse" %} + --add-runtime oci=/usr/sbin/docker-runc \ +{% endif %} + $DOCKER_OPTS \ + $DOCKER_STORAGE_OPTIONS \ + $DOCKER_DNS_OPTIONS +{% if not is_ostree and systemd_version.stdout|int >= 226 %} +TasksMax=infinity +{% endif %} +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=1min +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/fedora_docker.repo.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/fedora_docker.repo.j2 new file mode 100644 index 0000000..3958ff0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/fedora_docker.repo.j2 @@ -0,0 +1,7 @@ +[docker-ce] +name=Docker-CE Repository +baseurl={{ docker_fedora_repo_base_url }} +enabled=1 +gpgcheck={{ '1' if docker_fedora_repo_gpgkey else '0' }} +gpgkey={{ docker_fedora_repo_gpgkey }} +{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/http-proxy.conf.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..212f30f --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/templates/rh_docker.repo.j2 b/kubespray/extra_playbooks/roles/container-engine/docker/templates/rh_docker.repo.j2 new file mode 100644 index 0000000..178bbc2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/templates/rh_docker.repo.j2 @@ -0,0 +1,10 @@ +[docker-ce] +name=Docker-CE Repository +baseurl={{ docker_rh_repo_base_url }} +enabled=0 +gpgcheck={{ '1' if docker_rh_repo_gpgkey else '0' }} +keepcache={{ docker_rpm_keepcache | default('1') }} +gpgkey={{ docker_rh_repo_gpgkey }} +{% if http_proxy is defined %} +proxy={{ http_proxy }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/amazon.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/amazon.yml new file mode 100644 index 0000000..4871f4a --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/amazon.yml @@ -0,0 +1,15 @@ +--- +# https://docs.aws.amazon.com/en_us/AmazonECS/latest/developerguide/docker-basics.html + +docker_versioned_pkg: + 'latest': docker + '18.09': docker-18.09.9ce-2.amzn2 + '19.03': docker-19.03.13ce-1.amzn2 + '20.10': docker-20.10.7-5.amzn2 + +docker_version: "latest" + +docker_package_info: + pkgs: + - "{{ docker_versioned_pkg[docker_version | string] }}" + enablerepo: amzn2extra-docker diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/clearlinux.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/clearlinux.yml new file mode 100644 index 0000000..fbb7a22 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/clearlinux.yml @@ -0,0 +1,4 @@ +--- +docker_package_info: + pkgs: + - "containers-basic" diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/debian-stretch.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/debian-stretch.yml new file mode 100644 index 0000000..f26f60b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/debian-stretch.yml @@ -0,0 +1,45 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-1" + 'stable': "{{ containerd_package }}=1.4.3-1" + 'edge': "{{ containerd_package }}=1.4.3-1" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://download.docker.com/linux/debian/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_debian_repo_gpgkey }}' + repo_keys: + - '{{ docker_debian_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb {{ docker_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/debian.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/debian.yml new file mode 100644 index 0000000..d46bfa8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/debian.yml @@ -0,0 +1,49 @@ +--- +# containerd package info is only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-2" + '1.4.4': "{{ containerd_package }}=1.4.4-1" + '1.4.6': "{{ containerd_package }}=1.4.6-1" + '1.4.9': "{{ containerd_package }}=1.4.9-1" + '1.4.12': "{{ containerd_package }}=1.4.12-1" + '1.6.4': "{{ containerd_package }}=1.6.4-1" + 'stable': "{{ containerd_package }}=1.6.4-1" + 'edge': "{{ containerd_package }}=1.6.4-1" + +# https://download.docker.com/linux/debian/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_debian_repo_gpgkey }}' + repo_keys: + - '{{ docker_debian_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb {{ docker_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/fedora.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/fedora.yml new file mode 100644 index 0000000..8972fd8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/fedora.yml @@ -0,0 +1,37 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.fc{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.fc{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.fc{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.fc{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.4-3.1.fc{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.4-3.1.fc{{ ansible_distribution_major_version }}" + +# https://docs.docker.com/install/linux/docker-ce/fedora/ +# https://download.docker.com/linux/fedora//x86_64/stable/Packages/ +docker_versioned_pkg: + 'latest': docker-ce + '19.03': docker-ce-19.03.15-3.fc{{ ansible_distribution_major_version }} + '20.10': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '19.03': docker-ce-cli-19.03.15-3.fc{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/kylin.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/kylin.yml new file mode 100644 index 0000000..d212d41 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/kylin.yml @@ -0,0 +1,41 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/redhat-7.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/redhat-7.yml new file mode 100644 index 0000000..e37c416 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/redhat-7.yml @@ -0,0 +1,40 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el7" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el7" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el7" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el7" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el7" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el7" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el7" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el7" + 'stable': "{{ containerd_package }}-1.6.4-3.1.el7" + 'edge': "{{ containerd_package }}-1.6.4-3.1.el7" + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-18.09.9-3.el7 + '19.03': docker-ce-19.03.15-3.el7 + '20.10': docker-ce-20.10.20-3.el7 + 'stable': docker-ce-20.10.20-3.el7 + 'edge': docker-ce-20.10.20-3.el7 + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-18.09.9-3.el7 + '19.03': docker-ce-cli-19.03.15-3.el7 + '20.10': docker-ce-cli-20.10.20-3.el7 + 'stable': docker-ce-cli-20.10.20-3.el7 + 'edge': docker-ce-cli-20.10.20-3.el7 + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/redhat.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/redhat.yml new file mode 100644 index 0000000..836763f --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/redhat.yml @@ -0,0 +1,40 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/suse.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/suse.yml new file mode 100644 index 0000000..2d9fbf0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/suse.yml @@ -0,0 +1,6 @@ +--- +docker_package_info: + state: latest + pkgs: + - docker + - containerd diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/ubuntu-16.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/ubuntu-16.yml new file mode 100644 index 0000000..78a6cea --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/ubuntu-16.yml @@ -0,0 +1,46 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-2" + '1.4.4': "{{ containerd_package }}=1.4.4-1" + '1.4.6': "{{ containerd_package }}=1.4.6-1" + 'stable': "{{ containerd_package }}=1.4.6-1" + 'edge': "{{ containerd_package }}=1.4.6-1" + +# https://download.docker.com/linux/ubuntu/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce-cli=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_ubuntu_repo_gpgkey }}' + repo_keys: + - '{{ docker_ubuntu_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/ubuntu.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/ubuntu.yml new file mode 100644 index 0000000..cced07e --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/ubuntu.yml @@ -0,0 +1,49 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-2" + '1.4.4': "{{ containerd_package }}=1.4.4-1" + '1.4.6': "{{ containerd_package }}=1.4.6-1" + '1.4.9': "{{ containerd_package }}=1.4.9-1" + '1.4.12': "{{ containerd_package }}=1.4.12-1" + '1.6.4': "{{ containerd_package }}=1.6.4-1" + 'stable': "{{ containerd_package }}=1.6.4-1" + 'edge': "{{ containerd_package }}=1.6.4-1" + +# https://download.docker.com/linux/ubuntu/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_ubuntu_repo_gpgkey }}' + repo_keys: + - '{{ docker_ubuntu_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/extra_playbooks/roles/container-engine/docker/vars/uniontech.yml b/kubespray/extra_playbooks/roles/container-engine/docker/vars/uniontech.yml new file mode 100644 index 0000000..79b8abc --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/docker/vars/uniontech.yml @@ -0,0 +1,45 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + '1.6.8': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + disablerepo: "UniontechOS-20-AppStream" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/converge.yml new file mode 100644 index 0000000..b14d078 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + gvisor_enabled: true + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd + - role: container-engine/gvisor diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/10-mynet.conf b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/container.json b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/container.json new file mode 100644 index 0000000..acec0ce --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "gvisor1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "gvisor1.0.log", + "linux": {} +} diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/sandbox.json b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/sandbox.json new file mode 100644 index 0000000..a8da54d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "gvisor1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/molecule.yml new file mode 100644 index 0000000..5c3a7e1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/prepare.yml b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/prepare.yml new file mode 100644 index 0000000..8f9ef7d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/tests/test_default.py new file mode 100644 index 0000000..1cb7fb0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + gvisorruntime = "/usr/local/bin/runsc" + with host.sudo(): + cmd = host.command(gvisorruntime + " --version") + assert cmd.rc == 0 + assert "runsc version" in cmd.stdout + + +def test_run_pod(host): + runtime = "runsc" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/gvisor1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/extra_playbooks/roles/container-engine/gvisor/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/gvisor/tasks/main.yml new file mode 100644 index 0000000..fa5bd72 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/gvisor/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: gVisor | Download runsc binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}" + +- name: gVisor | Download containerd-shim-runsc-v1 binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}" + +- name: gVisor | Copy binaries + copy: + src: "{{ local_release_dir }}/gvisor-{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: yes + with_items: + - runsc + - containerd-shim-runsc-v1 diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/OWNERS b/kubespray/extra_playbooks/roles/container-engine/kata-containers/OWNERS new file mode 100644 index 0000000..fa95926 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pasqualet +reviewers: + - pasqualet diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/kata-containers/defaults/main.yml new file mode 100644 index 0000000..fc909ca --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/defaults/main.yml @@ -0,0 +1,9 @@ +--- +kata_containers_dir: /opt/kata +kata_containers_config_dir: /etc/kata-containers +kata_containers_containerd_bin_dir: /usr/local/bin + +kata_containers_qemu_default_memory: "{{ ansible_memtotal_mb }}" +kata_containers_qemu_debug: 'false' +kata_containers_qemu_sandbox_cgroup_only: 'true' +kata_containers_qemu_enable_mem_prealloc: 'false' diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/converge.yml new file mode 100644 index 0000000..a6fdf81 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + kata_containers_enabled: true + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd + - role: container-engine/kata-containers diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/container.json b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/container.json new file mode 100644 index 0000000..e2e9a56 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "kata1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "kata1.0.log", + "linux": {} +} diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/sandbox.json b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/sandbox.json new file mode 100644 index 0000000..326a578 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "kata1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/molecule.yml new file mode 100644 index 0000000..63a942b --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu18 + box: generic/ubuntu1804 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/prepare.yml b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/prepare.yml new file mode 100644 index 0000000..8a0978f --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/tests/test_default.py new file mode 100644 index 0000000..e10fff4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/molecule/default/tests/test_default.py @@ -0,0 +1,37 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " version") + assert cmd.rc == 0 + assert "kata-runtime" in cmd.stdout + + +def test_run_check(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " check") + assert cmd.rc == 0 + assert "System is capable of running" in cmd.stdout + + +def test_run_pod(host): + runtime = "kata-qemu" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/kata1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/kata-containers/tasks/main.yml new file mode 100644 index 0000000..54bd25d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: kata-containers | Download kata binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.kata_containers) }}" + +- name: kata-containers | Copy kata-containers binary + unarchive: + src: "{{ local_release_dir }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" + dest: "/" + mode: 0755 + remote_src: yes + +- name: kata-containers | Create config directory + file: + path: "{{ kata_containers_config_dir }}" + state: directory + mode: 0755 + +- name: kata-containers | Set configuration + template: + src: "{{ item }}.j2" + dest: "{{ kata_containers_config_dir }}/{{ item }}" + mode: 0644 + with_items: + - configuration-qemu.toml + +- name: kata-containers | Set containerd bin + vars: + shim: "{{ item }}" + template: + dest: "{{ kata_containers_containerd_bin_dir }}/containerd-shim-kata-{{ item }}-v2" + src: containerd-shim-kata-v2.j2 + mode: 0755 + with_items: + - qemu + +- name: kata-containers | Load vhost kernel modules + modprobe: + state: present + name: "{{ item }}" + with_items: + - vhost_vsock + - vhost_net + +- name: kata-containers | Persist vhost kernel modules + copy: + dest: /etc/modules-load.d/kubespray-kata-containers.conf + mode: 0644 + content: | + vhost_vsock + vhost_net diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 b/kubespray/extra_playbooks/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 new file mode 100644 index 0000000..4038242 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 @@ -0,0 +1,624 @@ +# Copyright (c) 2017-2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "cli/config/configuration-qemu.toml.in" +# XXX: Project: +# XXX: Name: Kata Containers +# XXX: Type: kata + +[hypervisor.qemu] +path = "/opt/kata/bin/qemu-system-x86_64" +{% if kata_containers_version is version('2.2.0', '>=') %} +kernel = "/opt/kata/share/kata-containers/vmlinux.container" +{% else %} +kernel = "/opt/kata/share/kata-containers/vmlinuz.container" +{% endif %} +image = "/opt/kata/share/kata-containers/kata-containers.img" +machine_type = "q35" + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one. +# Default false +# confidential_guest = true + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = [] + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/opt/kata/bin/qemu-system-x86_64"] +valid_hypervisor_paths = ["/opt/kata/bin/qemu-system-x86_64"] + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "" + +# Path to the firmware. +# If you want that qemu uses the default firmware leave this option empty +firmware = "" + +# Machine accelerators +# comma-separated list of machine accelerators to pass to the hypervisor. +# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` +machine_accelerators="" + +# CPU features +# comma-separated list of cpu features to pass to the cpu +# For example, `cpu_features = "pmu=off,vmx=off" +cpu_features="pmu=off" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to 1 +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. +default_maxvcpus = 0 + +# Bridges can be used to hot plug devices. +# Limitations: +# * Currently only pci bridges are supported +# * Until 30 devices per bridge can be hot plugged. +# * Until 5 PCI bridges can be cold plugged per VM. +# This limitation could be a bug in qemu or in the kernel +# Default number of bridges per SB/VM: +# unspecified or 0 --> will be set to 1 +# > 1 <= 5 --> will be set to the specified number +# > 5 --> will be set to 5 +default_bridges = 1 + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set 2048 MiB. +default_memory = {{ kata_containers_qemu_default_memory }} +# +# Default memory slots per SB/VM. +# If unspecified then it will be set 10. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = 10 + +# The size in MiB will be plused to max memory of hypervisor. +# It is the memory address space for the NVDIMM devie. +# If set block storage driver (block_device_driver) to "nvdimm", +# should set memory_offset to the size of block device. +# Default 0 +#memory_offset = 0 + +# Specifies virtio-mem will be enabled or not. +# Please note that this option should be used with the command +# "echo 1 > /proc/sys/vm/overcommit_memory". +# Default false +#enable_virtio_mem = true + +# Disable block device from being used for a container's rootfs. +# In case of a storage driver like devicemapper where a container's +# root file system is backed by a block device, the block device is passed +# directly to the hypervisor for performance reasons. +# This flag prevents the block device from being passed to the hypervisor, +# 9pfs is used instead to pass the rootfs. +disable_block_device_use = false + +# Shared file system type: +# - virtio-fs (default) +# - virtio-9p +{% if kata_containers_version is version('2.2.0', '>=') %} +shared_fs = "virtio-fs" +{% else %} +shared_fs = "virtio-9p" +{% endif %} + +# Path to vhost-user-fs daemon. +virtio_fs_daemon = "/opt/kata/libexec/kata-qemu/virtiofsd" + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/opt/kata/libexec/kata-qemu/virtiofsd"] +valid_virtio_fs_daemon_paths = ["/opt/kata/libexec/kata-qemu/virtiofsd"] + +# Default size of DAX cache in MiB +virtio_fs_cache_size = 0 + +# Extra args for virtiofsd daemon +# +# Format example: +# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] +# +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = ["--thread-pool-size=1"] + +# Cache mode: +# +# - none +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "always" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-scsi, virtio-blk +# or nvdimm. +block_device_driver = "virtio-scsi" + +# Specifies cache-related options will be set to block devices or not. +# Default false +#block_device_cache_set = true + +# Specifies cache-related options for block devices. +# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. +# Default false +#block_device_cache_direct = true + +# Specifies cache-related options for block devices. +# Denotes whether flush requests for the device are ignored. +# Default false +#block_device_cache_noflush = true + +# Enable iothreads (data-plane) to be used. This causes IO to be +# handled in a separate IO thread. This is currently only implemented +# for SCSI. +# +enable_iothreads = false + +# Enable pre allocation of VM RAM, default false +# Enabling this will result in lower container density +# as all of the memory will be allocated and locked +# This is useful when you want to reserve all the memory +# upfront or in the cases where you want memory latencies +# to be very predictable +# Default false +enable_mem_prealloc = {{ kata_containers_qemu_enable_mem_prealloc }} + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +# This is useful when you want to use vhost-user network +# stacks within the container. This will automatically +# result in memory pre allocation +#enable_hugepages = true + +# Enable vhost-user storage device, default false +# Enabling this will result in some Linux reserved block type +# major range 240-254 being chosen to represent vhost-user devices. +enable_vhost_user_store = false + +# The base directory specifically used for vhost-user devices. +# Its sub-path "block" is used for block devices; "block/sockets" is +# where we expect vhost-user sockets to live; "block/devices" is where +# simulated block device nodes for vhost-user devices to live. +vhost_user_store_path = "/var/run/kata-containers/vhost-user" + +# Enable vIOMMU, default false +# Enabling this will result in the VM having a vIOMMU device +# This will also add the following options to the kernel's +# command line: intel_iommu=on,iommu=pt +#enable_iommu = true + +# Enable IOMMU_PLATFORM, default false +# Enabling this will result in the VM device having iommu_platform=on set +#enable_iommu_platform = true + +# List of valid annotations values for the vhost user store path +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/var/run/kata-containers/vhost-user"] +valid_vhost_user_store_paths = ["/var/run/kata-containers/vhost-user"] + +# Enable file based guest memory support. The default is an empty string which +# will disable this feature. In the case of virtio-fs, this is enabled +# automatically and '/dev/shm' is used as the backing folder. +# This option will be ignored if VM templating is enabled. +#file_mem_backend = "" + +# List of valid annotations values for the file_mem_backend annotation +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: [""] +valid_file_mem_backends = [""] + +# Enable swap of vm memory. Default false. +# The behaviour is undefined if mem_prealloc is also set to true +#enable_swap = true + +# -pflash can add image file to VM. The arguments of it should be in format +# of ["/path/to/flash0.img", "/path/to/flash1.img"] +pflashes = [] + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. This extra output is added +# to the proxy logs, but only when proxy debug is also enabled. +# +# Default false +enable_debug = {{ kata_containers_qemu_debug }} + +# Disable the customizations done in the runtime when it detects +# that it is running on top a VMM. This will result in the runtime +# behaving as it would when running on bare metal. +# +#disable_nesting_checks = true + +# This is the msize used for 9p shares. It is the number of bytes +# used for 9p packet payload. +#msize_9p = 8192 + +# If true and vsocks are supported, use vsocks to communicate directly +# with the agent and no proxy is started, otherwise use unix +# sockets and start a proxy to communicate with the agent. +# Default false +#use_vsock = true + +# If false and nvdimm is supported, use nvdimm device to plug guest image. +# Otherwise virtio-block device is used. +# Default is false +#disable_image_nvdimm = true + +# VFIO devices are hotplugged on a bridge by default. +# Enable hotplugging on root bus. This may be required for devices with +# a large PCI bar, as this is a current limitation with hotplugging on +# a bridge. This value is valid for "pc" machine type. +# Default false +#hotplug_vfio_on_root_bus = true + +# Before hot plugging a PCIe device, you need to add a pcie_root_port device. +# Use this parameter when using some large PCI bar devices, such as Nvidia GPU +# The value means the number of pcie_root_port +# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" +# Default 0 +#pcie_root_port = 2 + +# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off +# security (vhost-net runs ring0) for network I/O performance. +#disable_vhost_net = true + +# +# Default entropy source. +# The path to a host source of entropy (including a real hardware RNG) +# /dev/urandom and /dev/random are two main options. +# Be aware that /dev/random is a blocking source of entropy. If the host +# runs out of entropy, the VMs boot time will increase leading to get startup +# timeouts. +# The source of entropy /dev/urandom is non-blocking and provides a +# generally acceptable source of entropy. It should work well for pretty much +# all practical purposes. +#entropy_source= "/dev/urandom" + +# List of valid annotations values for entropy_source +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/dev/urandom","/dev/random",""] +valid_entropy_sources = ["/dev/urandom","/dev/random",""] + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered will scanning for hooks, +# but it will not abort container execution. +#guest_hook_path = "/usr/share/oci/hooks" +# +# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. +# Default 0-sized value means unlimited rate. +#rx_rate_limiter_max_rate = 0 +# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) +# to discipline traffic. +# Default 0-sized value means unlimited rate. +#tx_rate_limiter_max_rate = 0 + +# Set where to save the guest memory dump file. +# If set, when GUEST_PANICKED event occurred, +# guest memeory will be dumped to host filesystem under guest_memory_dump_path, +# This directory will be created automatically if it does not exist. +# +# The dumped file(also called vmcore) can be processed with crash or gdb. +# +# WARNING: +# Dump guest’s memory can take very long depending on the amount of guest memory +# and use much disk space. +#guest_memory_dump_path="/var/crash/kata" + +# If enable paging. +# Basically, if you want to use "gdb" rather than "crash", +# or need the guest-virtual addresses in the ELF vmcore, +# then you should enable paging. +# +# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details +#guest_memory_dump_paging=false + +# Enable swap in the guest. Default false. +# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device +# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") +# is bigger than 0. +# The size of the swap device should be +# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. +# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. +# If swap_in_bytes and memory_limit_in_bytes is not set, the size should +# be default_memory. +#enable_guest_swap = true + +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Note: Requires "initrd=" to be set ("image=" is not supported). +# +# Default false +#enable_template = true + +# Specifies the path of template. +# +# Default "/run/vc/vm/template" +#template_path = "/run/vc/vm/template" + +# The number of caches of VMCache: +# unspecified or == 0 --> VMCache is disabled +# > 0 --> will be set to the specified number +# +# VMCache is a function that creates VMs as caches before using it. +# It helps speed up new container creation. +# The function consists of a server and some clients communicating +# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. +# The VMCache server will create some VMs and cache them by factory cache. +# It will convert the VM to gRPC format and transport it when gets +# requestion from clients. +# Factory grpccache is the VMCache client. It will request gRPC format +# VM and convert it back to a VM. If VMCache function is enabled, +# kata-runtime will request VM from factory grpccache when it creates +# a new sandbox. +# +# Default 0 +#vm_cache_number = 0 + +# Specify the address of the Unix socket that is used by VMCache. +# +# Default /var/run/kata-containers/cache.sock +#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" + +[proxy.kata] +path = "/opt/kata/libexec/kata-containers/kata-proxy" + +# If enabled, proxy messages will be sent to the system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +[shim.kata] +path = "/opt/kata/libexec/kata-containers/kata-shim" + +# If enabled, shim messages will be sent to the system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +# If enabled, the shim will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# +# Note: By default, the shim runs in a separate network namespace. Therefore, +# to allow it to send trace details to the Jaeger agent running on the host, +# it is necessary to set 'disable_new_netns=true' so that it runs in the host +# network namespace. +# +# (default: disabled) +#enable_tracing = true + +[agent.kata] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +# Enable agent tracing. +# +# If enabled, the default trace mode is "dynamic" and the +# default trace type is "isolated". The trace mode and type are set +# explicitly with the `trace_type=` and `trace_mode=` options. +# +# Notes: +# +# - Tracing is ONLY enabled when `enable_tracing` is set: explicitly +# setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing` +# will NOT activate agent tracing. +# +# - See https://github.com/kata-containers/agent/blob/master/TRACING.md for +# full details. +# +# (default: disabled) +#enable_tracing = true +# +#trace_mode = "dynamic" +#trace_type = "isolated" + +# Comma separated list of kernel modules and their parameters. +# These modules will be loaded in the guest kernel using modprobe(8). +# The following example can be used to load two kernel modules with parameters +# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] +# The first word is considered as the module name and the rest as its parameters. +# Container will not be started when: +# * A kernel module is specified and the modprobe command is not installed in the guest +# or it fails loading the module. +# * The module is not available in the guest or it doesn't met the guest kernel +# requirements, like architecture and version. +# +kernel_modules=[] + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[netmon] +# If enabled, the network monitoring process gets started when the +# sandbox is created. This allows for the detection of some additional +# network being added to the existing network namespace, after the +# sandbox has been created. +# (default: disabled) +#enable_netmon = true + +# Specify the path to the netmon binary. +path = "/opt/kata/libexec/kata-containers/kata-netmon" + +# If enabled, netmon messages will be sent to the system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="tcfilter" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=true + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `enable_netmon` +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# If you are using docker, `disable_new_netns` only works with `docker run --net=none` +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType +sandbox_cgroup_only={{ kata_containers_qemu_sandbox_cgroup_only }} + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=[] + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=[] + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ diff --git a/kubespray/extra_playbooks/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 b/kubespray/extra_playbooks/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 new file mode 100644 index 0000000..a3cb830 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 @@ -0,0 +1,2 @@ +#!/bin/bash +KATA_CONF_FILE={{ kata_containers_config_dir }}/configuration-{{ shim }}.toml {{ kata_containers_dir }}/bin/containerd-shim-kata-v2 $@ diff --git a/kubespray/extra_playbooks/roles/container-engine/meta/main.yml b/kubespray/extra_playbooks/roles/container-engine/meta/main.yml new file mode 100644 index 0000000..3e068d6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/meta/main.yml @@ -0,0 +1,58 @@ +# noqa role-name - this is a meta role that doesn't need a name +--- +dependencies: + - role: container-engine/validate-container-engine + tags: + - container-engine + - validate-container-engine + + - role: container-engine/kata-containers + when: + - kata_containers_enabled + tags: + - container-engine + - kata-containers + + - role: container-engine/gvisor + when: + - gvisor_enabled + - container_manager in ['docker', 'containerd'] + tags: + - container-engine + - gvisor + + - role: container-engine/crun + when: + - crun_enabled + tags: + - container-engine + - crun + + - role: container-engine/youki + when: + - youki_enabled + - container_manager == 'crio' + tags: + - container-engine + - youki + + - role: container-engine/cri-o + when: + - container_manager == 'crio' + tags: + - container-engine + - crio + + - role: container-engine/containerd + when: + - container_manager == 'containerd' + tags: + - container-engine + - containerd + + - role: container-engine/cri-dockerd + when: + - container_manager == 'docker' + tags: + - container-engine + - docker diff --git a/kubespray/extra_playbooks/roles/container-engine/nerdctl/handlers/main.yml b/kubespray/extra_playbooks/roles/container-engine/nerdctl/handlers/main.yml new file mode 100644 index 0000000..27895ff --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/nerdctl/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Get nerdctl completion + command: "{{ bin_dir }}/nerdctl completion bash" + changed_when: False + register: nerdctl_completion + check_mode: false + +- name: Install nerdctl completion + copy: + dest: /etc/bash_completion.d/nerdctl + content: "{{ nerdctl_completion.stdout }}" + mode: 0644 diff --git a/kubespray/extra_playbooks/roles/container-engine/nerdctl/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/nerdctl/tasks/main.yml new file mode 100644 index 0000000..ad08839 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/nerdctl/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: nerdctl | Download nerdctl + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.nerdctl) }}" + +- name: nerdctl | Copy nerdctl binary from download dir + copy: + src: "{{ local_release_dir }}/nerdctl" + dest: "{{ bin_dir }}/nerdctl" + mode: 0755 + remote_src: true + owner: root + group: root + become: true + notify: + - Get nerdctl completion + - Install nerdctl completion + +- name: nerdctl | Create configuration dir + file: + path: /etc/nerdctl + state: directory + mode: 0755 + owner: root + group: root + become: true + +- name: nerdctl | Install nerdctl configuration + template: + src: nerdctl.toml.j2 + dest: /etc/nerdctl/nerdctl.toml + mode: 0644 + owner: root + group: root + become: true diff --git a/kubespray/extra_playbooks/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 b/kubespray/extra_playbooks/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 new file mode 100644 index 0000000..cd1b5f9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 @@ -0,0 +1,10 @@ +debug = false +debug_full = false +address = "{{ cri_socket }}" +namespace = "k8s.io" +snapshotter = "native" +cni_path = "/opt/cni/bin" +cni_netconfpath = "/etc/cni/net.d" +cgroup_manager = "{{ kubelet_cgroup_driver | default('systemd') }}" +insecure_registry = {{ (containerd_insecure_registries is defined and containerd_insecure_registries|length>0) | bool | lower }} +hosts_dir = ["/etc/containerd/certs.d"] diff --git a/kubespray/extra_playbooks/roles/container-engine/runc/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/runc/defaults/main.yml new file mode 100644 index 0000000..af8aa08 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/runc/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +runc_bin_dir: "{{ bin_dir }}" + +runc_package_name: runc diff --git a/kubespray/extra_playbooks/roles/container-engine/runc/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/runc/tasks/main.yml new file mode 100644 index 0000000..7a8e336 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/runc/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: runc | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: runc | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: runc | Uninstall runc package managed by package manager + package: + name: "{{ runc_package_name }}" + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + +- name: runc | Download runc binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.runc) }}" + +- name: Copy runc binary from download dir + copy: + src: "{{ downloads.runc.dest }}" + dest: "{{ runc_bin_dir }}/runc" + mode: 0755 + remote_src: true + +- name: runc | Remove orphaned binary + file: + path: /usr/bin/runc + state: absent + when: runc_bin_dir != "/usr/bin" + ignore_errors: true # noqa ignore-errors diff --git a/kubespray/extra_playbooks/roles/container-engine/skopeo/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/skopeo/tasks/main.yml new file mode 100644 index 0000000..033ae62 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/skopeo/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: skopeo | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: skopeo | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: skopeo | Uninstall skopeo package managed by package manager + package: + name: skopeo + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + +- name: skopeo | Download skopeo binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.skopeo) }}" + +- name: Copy skopeo binary from download dir + copy: + src: "{{ downloads.skopeo.dest }}" + dest: "{{ bin_dir }}/skopeo" + mode: 0755 + remote_src: true diff --git a/kubespray/extra_playbooks/roles/container-engine/validate-container-engine/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/validate-container-engine/tasks/main.yml new file mode 100644 index 0000000..fdd60e0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/validate-container-engine/tasks/main.yml @@ -0,0 +1,153 @@ +--- +- name: validate-container-engine | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + tags: + - facts + +- name: validate-container-engine | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + tags: + - facts + +- name: Ensure kubelet systemd unit exists + stat: + path: "/etc/systemd/system/kubelet.service" + register: kubelet_systemd_unit_exists + tags: + - facts + +- name: Populate service facts + service_facts: + tags: + - facts + +- name: Check if containerd is installed + find: + file_type: file + recurse: yes + use_regex: yes + patterns: + - containerd.service$ + paths: + - /lib/systemd + - /etc/systemd + - /run/systemd + register: containerd_installed + tags: + - facts + +- name: Check if docker is installed + find: + file_type: file + recurse: yes + use_regex: yes + patterns: + - docker.service$ + paths: + - /lib/systemd + - /etc/systemd + - /run/systemd + register: docker_installed + tags: + - facts + +- name: Check if crio is installed + find: + file_type: file + recurse: yes + use_regex: yes + patterns: + - crio.service$ + paths: + - /lib/systemd + - /etc/systemd + - /run/systemd + register: crio_installed + tags: + - facts + +- name: Uninstall containerd + block: + - name: Drain node + include_role: + name: remove-node/pre-remove + apply: + tags: + - pre-remove + when: kubelet_systemd_unit_exists.stat.exists + - name: Stop kubelet + service: + name: kubelet + state: stopped + when: kubelet_systemd_unit_exists.stat.exists + - name: Remove Containerd + import_role: + name: container-engine/containerd + tasks_from: reset + handlers_from: reset + vars: + service_name: containerd.service + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + - container_manager != "containerd" + - docker_installed.matched == 0 + - containerd_installed.matched > 0 + - ansible_facts.services[service_name]['state'] == 'running' + +- name: Uninstall docker + block: + - name: Drain node + include_role: + name: remove-node/pre-remove + apply: + tags: + - pre-remove + when: kubelet_systemd_unit_exists.stat.exists + - name: Stop kubelet + service: + name: kubelet + state: stopped + when: kubelet_systemd_unit_exists.stat.exists + - name: Remove Docker + import_role: + name: container-engine/docker + tasks_from: reset + vars: + service_name: docker.service + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + - container_manager != "docker" + - docker_installed.matched > 0 + - ansible_facts.services[service_name]['state'] == 'running' + +- name: Uninstall crio + block: + - name: Drain node + include_role: + name: remove-node/pre-remove + apply: + tags: + - pre-remove + when: kubelet_systemd_unit_exists.stat.exists + - name: Stop kubelet + service: + name: kubelet + state: stopped + when: kubelet_systemd_unit_exists.stat.exists + - name: Remove CRI-O + import_role: + name: container-engine/cri-o + tasks_from: reset + vars: + service_name: crio.service + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + - container_manager != "crio" + - crio_installed.matched > 0 + - ansible_facts.services[service_name]['state'] == 'running' diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/defaults/main.yml b/kubespray/extra_playbooks/roles/container-engine/youki/defaults/main.yml new file mode 100644 index 0000000..2250f22 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +youki_bin_dir: "{{ bin_dir }}" diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/converge.yml b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/converge.yml new file mode 100644 index 0000000..11ef8f6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + youki_enabled: true + container_manager: crio + roles: + - role: kubespray-defaults + - role: container-engine/cri-o + - role: container-engine/youki diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/10-mynet.conf b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..b9fa3ba --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.4.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/container.json b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/container.json new file mode 100644 index 0000000..a5d5094 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "youki1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "youki1.0.log", + "linux": {} +} diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/sandbox.json b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/sandbox.json new file mode 100644 index 0000000..b2a4ffe --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "youki1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/molecule.yml b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/molecule.yml new file mode 100644 index 0000000..5c3a7e1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/prepare.yml b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/prepare.yml new file mode 100644 index 0000000..e948686 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: crio + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/tests/test_default.py b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/tests/test_default.py new file mode 100644 index 0000000..54ed5c5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + youkiruntime = "/usr/local/bin/youki" + with host.sudo(): + cmd = host.command(youkiruntime + " --version") + assert cmd.rc == 0 + assert "youki" in cmd.stdout + + +def test_run_pod(host): + runtime = "youki" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/youki1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/extra_playbooks/roles/container-engine/youki/tasks/main.yml b/kubespray/extra_playbooks/roles/container-engine/youki/tasks/main.yml new file mode 100644 index 0000000..1095c3d --- /dev/null +++ b/kubespray/extra_playbooks/roles/container-engine/youki/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: youki | Download youki + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.youki) }}" + +- name: youki | Copy youki binary from download dir + copy: + src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki" + dest: "{{ youki_bin_dir }}/youki" + mode: 0755 + remote_src: true diff --git a/kubespray/extra_playbooks/roles/download/defaults/main.yml b/kubespray/extra_playbooks/roles/download/defaults/main.yml new file mode 100644 index 0000000..0f958d0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/defaults/main.yml @@ -0,0 +1,1943 @@ +--- +local_release_dir: /tmp/releases +download_cache_dir: /tmp/kubespray_cache + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false + +# do not delete remote cache files after using them +# NOTE: Setting this parameter to TRUE is only really useful when developing kubespray +download_keep_remote_cache: false + +# Only useful when download_run_once is false: Localy cached files and images are +# uploaded to kubernetes nodes. Also, images downloaded on those nodes are copied +# back to the ansible runner's cache, if they are not yet preset. +download_force_cache: false + +# Used to only evaluate vars from download role +skip_downloads: false + +# Optionally skip kubeadm images download +skip_kubeadm_images: false +kubeadm_images: {} + +# if this is set to true will only download files once. Doesn't work +# on Flatcar Container Linux by Kinvolk unless the download_localhost is true and localhost +# is running another OS type. Default compress level is 1 (fastest). +download_run_once: false +download_compress: 1 + +# if this is set to true will download container +download_container: true + +# if this is set to true, uses the localhost for download_run_once mode +# (requires docker and sudo to access docker). You may want this option for +# local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes. +# Otherwise, uses the first node in the kube_control_plane group to store images +# in the download_run_once mode. +download_localhost: false + +# Always pull images if set to True. Otherwise check by the repo's tag/digest. +download_always_pull: false + +# Some problems may occur when downloading files over https proxy due to ansible bug +# https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +# SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +download_validate_certs: true + +# Use the first kube_control_plane if download_localhost is not set +download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}" + +# The docker_image_info_command might seems weird but we are using raw/endraw and `{{ `{{` }}` to manage the double jinja2 processing +docker_image_pull_command: "{{ docker_bin_dir }}/docker pull" +docker_image_info_command: "{{ docker_bin_dir }}/docker images -q | xargs -i {{ '{{' }} docker_bin_dir }}/docker inspect -f {% raw %}'{{ '{{' }} if .RepoTags }}{{ '{{' }} join .RepoTags \",\" }}{{ '{{' }} end }}{{ '{{' }} if .RepoDigests }},{{ '{{' }} join .RepoDigests \",\" }}{{ '{{' }} end }}' {% endraw %} {} | tr '\n' ','" +nerdctl_image_info_command: "{{ bin_dir }}/nerdctl -n k8s.io images --format '{% raw %}{{ .Repository }}:{{ .Tag }}{% endraw %}' 2>/dev/null | grep -v ^:$ | tr '\n' ','" +nerdctl_image_pull_command: "{{ bin_dir }}/nerdctl -n k8s.io pull --quiet {{ nerdctl_extra_flags }}" +crictl_image_info_command: "{{ bin_dir }}/crictl images --verbose | awk -F ': ' '/RepoTags|RepoDigests/ {print $2}' | tr '\n' ','" +crictl_image_pull_command: "{{ bin_dir }}/crictl pull" + +image_command_tool: "{%- if container_manager == 'containerd' -%}nerdctl{%- elif container_manager == 'crio' -%}crictl{%- else -%}{{ container_manager }}{%- endif -%}" +image_command_tool_on_localhost: "{{ image_command_tool }}" + +image_pull_command: "{{ lookup('vars', image_command_tool + '_image_pull_command') }}" +image_info_command: "{{ lookup('vars', image_command_tool + '_image_info_command') }}" +image_pull_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_pull_command') }}" +image_info_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_info_command') }}" + +# Arch of Docker images and needed packages +image_arch: "{{host_architecture | default('amd64')}}" + +# Nerdctl insecure flag set +nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 -%}--insecure-registry{%- else -%}{%- endif -%}' + +# Versions +kubeadm_version: "{{ kube_version }}" +crun_version: 1.4.5 +runc_version: v1.1.4 +kata_containers_version: 2.4.1 +youki_version: 0.0.1 +gvisor_version: 20210921 +containerd_version: 1.6.14 +cri_dockerd_version: 0.2.2 + +# this is relevant when container_manager == 'docker' +docker_containerd_version: 1.6.4 + +# gcr and kubernetes image repo define +gcr_image_repo: "gcr.io" +kube_image_repo: "registry.k8s.io" + +# docker image repo define +docker_image_repo: "docker.io" + +# quay image repo define +quay_image_repo: "quay.io" + +# github image repo define (ex multus only use that) +github_image_repo: "ghcr.io" + +# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults +# after migration to container download +calico_version: "v3.24.5" +calico_ctl_version: "{{ calico_version }}" +calico_cni_version: "{{ calico_version }}" +calico_flexvol_version: "{{ calico_version }}" +calico_policy_version: "{{ calico_version }}" +calico_typha_version: "{{ calico_version }}" +calico_apiserver_version: "{{ calico_version }}" +typha_enabled: false +calico_apiserver_enabled: false + +flannel_version: "v0.19.2" +flannel_cni_version: "v1.1.0" +cni_version: "v1.1.1" +weave_version: 2.8.1 +pod_infra_version: "3.7" + +cilium_version: "v1.12.1" +cilium_cli_version: "v0.12.5" +cilium_enable_hubble: false + +kube_ovn_version: "v1.10.7" +kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}" +kube_router_version: "v1.5.1" +multus_version: "v3.8-{{ image_arch }}" +helm_version: "v3.10.3" +nerdctl_version: "1.0.0" +krew_version: "v0.4.3" +skopeo_version: v1.10.0 + +# Get kubernetes major version (i.e. 1.17.4 => 1.17) +kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}" + +etcd_supported_versions: + v1.25: "v3.5.6" + v1.24: "v3.5.6" + v1.23: "v3.5.6" +etcd_version: "{{ etcd_supported_versions[kube_major_version] }}" + +crictl_supported_versions: + v1.25: "v1.25.0" + v1.24: "v1.24.0" + v1.23: "v1.23.0" +crictl_version: "{{ crictl_supported_versions[kube_major_version] }}" + +crio_supported_versions: + v1.25: v1.25.1 + v1.24: v1.24.3 + v1.23: v1.23.2 +crio_version: "{{ crio_supported_versions[kube_major_version] }}" + +# Download URLs +kubelet_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" +kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +etcd_download_url: "https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" +cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" +calicoctl_download_url: "https://github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +calicoctl_alternate_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" +ciliumcli_download_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" +crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +crio_download_url: "https://storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz" +helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" +runc_download_url: "https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +crun_download_url: "https://github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" +youki_download_url: "https://github.com/containers/youki/releases/download/v{{ youki_version }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz" +kata_containers_download_url: "https://github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" +# gVisor only supports amd64 and uses x86_64 to in the download link +gvisor_runsc_download_url: "https://storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +gvisor_containerd_shim_runsc_download_url: "https://storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" +nerdctl_download_url: "https://github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +krew_download_url: "https://github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz" +containerd_download_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" +skopeo_download_url: "https://github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}" + +crictl_checksums: + arm: + v1.25.0: c4efe3649af5542f2b07cdfc0be62e9e13c7bb846a9b59d57e190c764f28dae4 + v1.24.0: 1ab8a88d6ce1e9cff1c76fc454d2d41cf0c89e98c6db15a41804a3a5874cbf89 + v1.23.0: c20f7a118183d1e6da24c3709471ea0b4dee51cb709f958e0d90f3acb4eb59ae + arm64: + v1.25.0: 651c939eca010bbf48cc3932516b194028af0893025f9e366127f5b50ad5c4f4 + v1.24.0: b6fe172738dfa68ca4c71ade53574e859bf61a3e34d21b305587b1ad4ab28d24 + v1.23.0: 91094253e77094435027998a99b9b6a67b0baad3327975365f7715a1a3bd9595 + amd64: + v1.25.0: 86ab210c007f521ac4cdcbcf0ae3fb2e10923e65f16de83e0e1db191a07f0235 + v1.24.0: 3df4a4306e0554aea4fdc26ecef9eea29a58c8460bebfaca3405799787609880 + v1.23.0: b754f83c80acdc75f93aba191ff269da6be45d0fc2d3f4079704e7d1424f1ca8 + ppc64le: + v1.25.0: 1b77d1f198c67b2015104eee6fe7690465b8efa4675ea6b4b958c63d60a487e7 + v1.24.0: 586c263678c6d8d543976607ea1732115e622d44993e2bcbed29832370d3a754 + v1.23.0: 53db9e605a3042ea77bbf42a01a4e248dea8839bcab544c491745874f73aeee7 + +crio_archive_checksums: + arm: + v1.25.1: 0 + v1.24.3: 0 + v1.23.2: 0 + arm64: + v1.25.1: add26675dc993b292024d007fd69980d8d1e75c675851d0cb687fe1dfd1f3008 + v1.24.3: d8040602e03c90e4482b4ce97b63c2cf1301cd2afb0aa722342f40f3537a1a1f + v1.23.2: a866ccc3a062ac29906a619b9045a5e23b11fa9249f8802f8be0849491d01fbd + amd64: + v1.25.1: 49f98a38805740c40266a5bf3badc28e4ca725ccf923327c75c00fccc241f562 + v1.24.3: 43f6e3a7ad6ae8cf05ed0f1e493578c28abf6a798aedb8ee9643ff7c25a68ca3 + v1.23.2: 5c766dbf366a80f8b5dbc7a06d566f43e7cb0675186c50062df01f3b3cb5e526 + ppc64le: + v1.25.1: 0 + v1.24.3: 0 + v1.23.2: 0 + +# Checksum +# Kubernetes versions above Kubespray's current target version are untested and should be used with caution. +kubelet_checksums: + arm: + v1.25.5: fdaade890ed44ce55a1086dd1b1bde44daac02f90eacd9faf14fd182af1ffda0 + v1.25.4: 1af9c17daa07c215a8ce40f7e65896279276e11b6f7a7d9ae850a0561e149ad8 + v1.25.3: 9745a48340ca61b00f0094e4b8ff210839edcf05420f0d57b3cb1748cb887060 + v1.25.2: 995f885543fa61a08bd4f1008ba6d7417a1c45bd2a8e0f70c67a83e53b46eea5 + v1.25.1: 6fe430ad91e1ed50cf5cc396aa204fda0889c36b8a3b84619d633cd9a6a146e2 + v1.25.0: ad45ac3216aa186648fd034dec30a00c1a2d2d1187cab8aae21aa441a13b4faa + v1.24.9: d91e82f0bee5c7dc3bb0b35d67dc2335404ec44a316fc369cec5c749522e9bd5 + v1.24.8: 0756748c89293e2c502ffcf7a275c3bb98a7b919d59130e5e0376c8afb327fe2 + v1.24.7: 3841e80f54ee5576928e799e4962231261bcdafe94868a310a8782da9a321da5 + v1.24.6: 084e469d1d3b60363e5e20812ee0d909daa5496f3e6ebd305d1f23d1fe0709d4 + v1.24.5: ce55155d1aff0c72effee19c6bef534c2b7d1b23ec701d70335d181bd2d12a87 + v1.24.4: f9d387c18159a4473e7bdc290780ba1b1c92e8d8b41f558c15ee044db54636cd + v1.24.3: fe34b1a0892cdfb015f66be8f2d3450130a5d04f9466732020e186c8da0ee799 + v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22 + v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec + v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3 + v1.23.15: 6b057a9b55b20b8a8cf0c6d2947ed5bcff77ffb311f785755cecce5917944910 + v1.23.14: ddbb9930e232b51b2f3bbe6f944b96642cfb120f4fdd1820128fb842a454a947 + v1.23.13: 58f744247dbc8bca50b01ec1c25b0b5868736319f9cc8bf964fc2c1dd9eef0f9 + v1.23.12: 5b7c38206ba3c04cd756062b74093548ac6309dc086c2893351b1c479f5415a3 + v1.23.11: 93bbe3a130dcd7d5732e8b949f13ba8728bb37d3d4bd58408f99352cf484f9d0 + v1.23.10: d6d5aa26f16e735962cac5f2ee8ddc0d3b9d2aa14b8e968cb55fc9745f9a8b03 + v1.23.9: f22edc9838eb3d0788d951c1fc8fdb0e1bf6c43ad638a215172f25b54ca27a8a + v1.23.8: 53c4f44ba10d9c53a4526fccb4d20146e52473788058684ca2de74ae0e1abb11 + v1.23.7: f9910e670aea8845b6b07ecd36d43d8ac0901ee3244264d2bc0f6ea918d862ac + v1.23.6: 2f3fb387c20de1da586ac6bc43fa714fb7c2116b4243a2ef1e28ecfbba324cea + v1.23.5: 9505cf63fb56a1d90d1db9c1507587621455a152ef16d871e802875e1e7b4587 + v1.23.4: e67a51013ed59ea3df0ad1d54863d483cc99247584992b8cad6dd612135a70c5 + v1.23.3: 80a2c005e7b6c4e9363a18fa1d8911b6592eb2f93cbaa8a56fe5f6f59515d1a4 + v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908 + v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce + v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba + arm64: + v1.25.5: 18aa53ff59740a11504218905b51b29cc78fb8b5dd818a619141afa9dafb8f5a + v1.25.4: 8ff80a12381fad2e96c9cec6712591018c830cdd327fc7bd825237aa51a6ada3 + v1.25.3: 929d25fc3f901749b058141a9c624ff379759869e09df49b75657c0be3141091 + v1.25.2: c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae + v1.25.1: b6baa99b99ecc1f358660208a9a27b64c65f3314ff95a84c73091b51ac98484b + v1.25.0: 69572a7b3d179d4a479aa2e0f90e2f091d8d84ef33a35422fc89975dc137a590 + v1.24.9: 34021c6cf593ffc4361e9e2adc3d6e4f5683383eeb894f40d63ccfa268e84f4c + v1.24.8: 4e1427651e4ff3927f96ce4b93c471ccc76c683fc1619ee0d677d77345b54edb + v1.24.7: d8bd38e595ca061c53d3b7d1daebe5b3cc1ad44c731666bd5e842d336077db4b + v1.24.6: 2a7b8e131d6823462e38bc1514b5dea5dca86254b3a12ed4a0fa653c2e06dd0e + v1.24.5: dd5dcea80828979981654ec0732b197be252a3259a527cbc299d9575bc2de3e8 + v1.24.4: 2d9817c1e9e1edd9480aa05862ea6e9655a9512d820b1933175f5d7c8253ca61 + v1.24.3: 6c04ae25ee9b434f40e0d2466eb4ef5604dc43f306ddf1e5f165fc9d3c521e12 + v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0 + v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf + v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e + v1.23.15: b5540d2b67f325ad79af6b86a88bc3d1a8a225453911e7ebb7387788ce355a87 + v1.23.14: 80cdff15398c8215bb7337efdee25b40c862befbdf7925f6a8aca71bc9a79eae + v1.23.13: 4e2297c9893d425bfcd80741b95fb1a5b59b4fd4f4bcf782ccab94760e653cdf + v1.23.12: b802f12c79a9797f83a366c617144d019d2994fc724c75f642a9d031ce6a3488 + v1.23.11: ce4f568c3193e8e0895062f783980da89adb6b54a399c797656a3ce172ddb2fc + v1.23.10: 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8 + v1.23.9: c11b14ab3fa8e567c54e893c5a937f53618b26c9b62416cc8aa7760835f68350 + v1.23.8: 1b4ec707e29e8136e3516a437cb541a79c52c69b1331a7add2b47e7ac7d032e6 + v1.23.7: e96b746a77b00c04f1926035899a583ce28f02e9a5dca26c1bfb8251ca6a43bb + v1.23.6: 11a0310e8e7af5a11539ac26d6c14cf1b77d35bce4ca74e4bbd053ed1afc8650 + v1.23.5: 61f7e3ae0eb00633d3b5163c046cfcae7e73b5f26d4ffcf343f3a45904323583 + v1.23.4: c4f09c9031a34549fbaa48231b115fee6e170ce6832dce26d4b50b040aad2311 + v1.23.3: 95c36d0d1e65f6167f8fa80df04b3a816bc803e6bb5554f04d6af849c729a77d + v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f + v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d + v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9 + amd64: + v1.25.5: 16b23e1254830805b892cfccf2687eb3edb4ea54ffbadb8cc2eee6d3b1fab8e6 + v1.25.4: 7f7437e361f829967ee02e30026d7e85219693432ac5e930cc98dd9c7ddb2fac + v1.25.3: d5c89c5e5dae6afa5f06a3e0e653ac3b93fa9a93c775a715531269ec91a54abe + v1.25.2: 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3 + v1.25.1: 63e38bcbc4437ce10227695f8722371ec0d178067f1031d09fe1f59b6fcf214a + v1.25.0: 7f9183fce12606818612ce80b6c09757452c4fb50aefea5fc5843951c5020e24 + v1.24.9: 8753b9ae0c3e22f09dafdb4178492582c28874f70844de38dc43eb3fad5ca8bb + v1.24.8: 2da0b93857cf352bff5d1eb42e34d398a5971b63a53d8687b45179a78540d6d6 + v1.24.7: 4d24c97c924c40971412cc497145ad823e4b7b87ccda97ebced375f7e886e9e2 + v1.24.6: f8b606f542327128e404d2e66a72a40dc2ddb4175fb8e93c55effeacea60921b + v1.24.5: 2448debe26e90341b038d7ccfcd55942c76ef3d9db48e42ceae5e8de3fbad631 + v1.24.4: 0f34d12aaa1b911adbf75dd63df03d0674dde921fa0571a51acd2b5b576ba0a4 + v1.24.3: da575ceb7c44fddbe7d2514c16798f39f8c10e54b5dbef3bcee5ac547637db11 + v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343 + v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde + v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52 + v1.23.15: 5cf382d911c13c9cc8f770251b3a2fd9399c70ac50337874f670b9078f88231d + v1.23.14: f2bef00508790f632d035a6cfdd31539115611bfc93c5a3266ceb95bb2f27b76 + v1.23.13: 4d8f796b82dbe2b89b6d587bfeedf66724526b211c75a53456d4ac4014e3dcca + v1.23.12: 98ffa8a736d3e43debb1aa61ae71dea3671989cde5e9e44c6ee51a3d47c63614 + v1.23.11: b0e6d413f9b4cf1007fcb9f0ea6460ed5273a50c945ae475c224036b0ab817f7 + v1.23.10: c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b + v1.23.9: a5975920be1de0768e77ef101e4e42b179406add242c0883a7dc598f2006d387 + v1.23.8: 1ba15ad4d9d99cfc3cbef922b5101492ad74e812629837ac2e5705a68cb7af1e + v1.23.7: 518f67200e853253ed6424488d6148476144b6b796ec7c6160cff15769b3e12a + v1.23.6: fbb83e35f6b9f7cae19c50694240291805ca9c4028676af868306553b3e9266c + v1.23.5: 253b9db2299b09b91e4c09781ce1d2db6bad2099cf16ba210245159f48d0d5e4 + v1.23.4: ec3db57edcce219c24ef37f4a6a2eef5a1543e4a9bd15e7ecc993b9f74950d91 + v1.23.3: 8f9d2dd992af82855fbac2d82e030429b08ba7775e4fee7bf043eb857dfb0317 + v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6 + v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4 + v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518 + ppc64le: + v1.25.5: 3071e26e648ff50880d699ccabd677537b9e2762d1ece9e11401adde664f8e28 + v1.25.4: 3d4806fae6f39f091ea3d9fb195aa6d3e1ef779f56e485b6afbb328c25e15bdc + v1.25.3: 447a8b34646936bede22c93ca85f0a98210c9f61d6963a7d71f7f6a5152af1d1 + v1.25.2: a45dc00ac3a8074c3e9ec6a45b63c0a654529a657d929f28bd79c550a0d213d7 + v1.25.1: c1e3373ac088e934635fb13004a21ada39350033bfa0e4b258c114cb86b69138 + v1.25.0: 8015f88d1364cf77436c157de8a5d3ab87f1cb2dfaa9289b097c92a808845491 + v1.24.9: 3011fee2b8256e54efa24f3fc294642a6106a483722d89e82aa962a4435c86b2 + v1.24.8: 58ee62ed2fd4858d308ba672183ea0704555d977892510042fc2108da54cb93c + v1.24.7: 621ce04d0cb1c66065303d062bf9ac248225b8428b1adbca3f6fa6dd2eda13cc + v1.24.6: ea9068c28a0107f5e1317ef8ba3a23965d95ee57db6fa71ee27433cdaa0fe33c + v1.24.5: 56844b2594212e81d7cd4470f81da5d0f79876f044ee6d1707166fe76fdcb03a + v1.24.4: 38475815448bd5d43e893b6a9ac9fd3ae8b0dbddf8a7ba92d3f83437b5c1b916 + v1.24.3: 0bfb73c1932c8593ef6281efc6d16bf440275fed1272466f76101ea0f0971907 + v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132 + v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06 + v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931 + v1.23.15: f149c45853bda1f9353ae4664d2a02caa9ec4ccfb789870e4004519316714eef + v1.23.14: 2d71172abd71f3b1b3a8361c5cc55ec89b031052b2f91d64133b278e2b894a91 + v1.23.13: 444c646dc94dd7f7541a91ddc16a0da7259e345e1f84ec648077f447626844a2 + v1.23.12: e14a9dd3e3615e781d1de9000b250267eddfbab5ba46432ad2aa9108a5992e6a + v1.23.11: 64b02bc0f17b9df2b7ca8006d6cb6c1345f32fe6e748fcb6cbe9c4b406b116f6 + v1.23.10: a8f742b9b1c0b1a70719da6ea52e92d276b5ad6c59db0070aacdc474292c7e7a + v1.23.9: 6b05833c938c1d31e7450e93aebff561dfaa43eacafde1a011e0945ec2114fec + v1.23.8: f07b6194add802e2e5c5905a79ef744118ccb82ebcbf4e402a11bdb478de2c0f + v1.23.7: e011d7ad6aa01c5d1858ee88829d4a46b66dae10602615f46a7d4a0f9d9c2d6e + v1.23.6: 04461a5f75c2734ec5989f03bf72d766fb8d55021f1625b671bf805a62882089 + v1.23.5: 82e24cc48f23c0bfa3e90cce14b7ae0e0fb28a9ed9d2827e8ca503588f7ea1b5 + v1.23.4: f23611aea7130ba423268983ba1ce6db9451f69069dd16a8dbf013ab46237196 + v1.23.3: 055a9c9e8679c9ff963e43d1dc7d7aa3670a8aa56b96725de85c816e682c24bb + v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca + v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1 + v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753 +kubectl_checksums: + arm: + v1.25.5: fec9a0f7cd922744935dd5dfc2366ab307424ef4c533299d67edf7de15346e51 + v1.25.4: 49ab7f05bb27a710575c2d77982cbfb4a09247ec94a8e21af28a6e300b698a44 + v1.25.3: 59e1dba0951f19d4d18eb04db50fcd437c1d57460f2008bc03e668f71b8ea685 + v1.25.2: d6b581a41b010ef86a9364102f8612d2ee7fbc7dd2036e40ab7c85adb52331cb + v1.25.1: e8c6bfd8797e42501d14c7d75201324630f15436f712c4f7e46ce8c8067d9adc + v1.25.0: 0b907cfdcabafae7d2d4ac7de55e3ef814df999acdf6b1bd0ecf6abbef7c7131 + v1.24.9: a64fbc95696f982cb55622aeb9ef85a121b1473c8e52296768bb3d82ca53c85c + v1.24.8: b74c8ac75804fd35a14fab7f637acaf5c0cf94dfd0f5ce8d755104b1a1b2e43b + v1.24.7: 1829c5bb2ef30df6e46f99aa5c87a0f510a809f9169c725b3da08455bcf7f258 + v1.24.6: 7ca8fd7f5d6262668c20e3e639759e1976590ed4bd4fece62861dd376c2168de + v1.24.5: 3ca0fcb90b715f0c13eafe15c9100495a8648d459f1281f3340875d1b0b7e78f + v1.24.4: 060c0bb55aa3284c489cf8224ab10296d486b5a2e7f3e5d6440c9382698bf68a + v1.24.3: 4ae94095580973931da53fd3b823909d85ca05055d6300f392d9dc9e5748d612 + v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a + v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f + v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc + v1.23.15: 0fe6641715ee98a3d8899edd539322fa07762f8d65a35db23184ef06c1ff8111 + v1.23.14: 071f390f560320c4caff188d8f6f21c1b3258dfed600184f39d054d1d0673f69 + v1.23.13: c32baf45ad141f967b4877c7151aeee1ae296eebdbcb7a5200d418bd77c284b2 + v1.23.12: 94e946dcd1c2f7c8c9e3e022202762a36dab604b861b50bdcbdfb2c719731bd9 + v1.23.11: 6eaffb8f64929e888137366cf2aa7fd1df2cf851de4f96f62fe70ed4d79f0ef7 + v1.23.10: b2156478b03b90c0f72fd386ceab2e78b7cf32eab9d9b4696c28d2bb45c9d3ec + v1.23.9: 44caabd847c147ded79aa91daa49a5e0ea68ce4a0833b0733df1c8313375ff80 + v1.23.8: c4a2be3c61f40d4b1b0f61d509b0e361e85f10b7d2a98120d180c023ede7728f + v1.23.7: bc74849aabe50feb71333e41130ecf1122c0f79705a5fdc9d1ec2fce621bf749 + v1.23.6: 30d8e9656334b57e78c8dbc5d5f245a64b9a74c4fd03db47182fa7a21c2f5e32 + v1.23.5: 58420bc549e1683a4529066b38b2ac657611ed3b70041be78fba3b29401415db + v1.23.4: bde3d7801cfe444d4e226d4669dfd518e4687e16c99efddd016c4bf3d529b198 + v1.23.3: bc41382fbd3f6b33cb5ccb1819c5a38f2e6f3c9ce22acfedd6970b0b9b7748da + v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173 + v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5 + v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644 + arm64: + v1.25.5: 7bc650f28a5b4436df2abcfae5905e461728ba416146beac17a2634fa82a6f0a + v1.25.4: a8e9cd3c6ca80b67091fc41bc7fe8e9f246835925c835823a08a20ed9bcea1ba + v1.25.3: cfd5092ce347a69fe49c93681a164d9a8376d69eef587da894207c62ec7d6a5d + v1.25.2: b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5 + v1.25.1: 73602eabf20b877f88642fafcbe1eda439162c2c1dbcc9ed09fdd4d7ac9919ea + v1.25.0: 24db547bbae294c5c44f2b4a777e45f0e2f3d6295eace0d0c4be2b2dfa45330d + v1.24.9: f59c522cf5f9db826c64f28364946acb6bcb6957669291fa29b926b7812b5bbe + v1.24.8: b8ac2abfcb1fa04695d18098558ff483ec2c2488877b5abc4035a543544cdcb1 + v1.24.7: 4b138a11b13210ce1731e06918f8fff6709c004c6fb6bec28544713854de9fe8 + v1.24.6: 2f62e55960b02bb63cbc9154141520ac7cf0c2d55b45dd4a72867971e24a7219 + v1.24.5: a5e348758c0f2b22adeb1b663b4b66781bded895d8ea2a714eb1de81fb00907a + v1.24.4: 0aa4a08ff81efe3fc1a8ef880ca2f8622e3b1f93bf622583d7b9bfe3124afe61 + v1.24.3: bdad4d3063ddb7bfa5ecf17fb8b029d5d81d7d4ea1650e4369aafa13ed97149a + v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79 + v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271 + v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d + v1.23.15: f619f8b4811d60edef692f1d888609cc279a7d8223e50e1c0dc959c7b9250e79 + v1.23.14: 857716aa5cd24500349e5de8238060845af34b91ac4683bd279988ad3e1d3efa + v1.23.13: 950626ae35fca6c26096f97cac839d76e2f29616048ad30cec68f1ff003840f2 + v1.23.12: 88ebbc41252b39d49ce574a5a2bb25943bb82e55a252c27fe4fc096ce2dbb437 + v1.23.11: 9416cc7abaf03eb83f854a45a41986bf4e1232d129d7caafc3101a01ca11b0e3 + v1.23.10: d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b + v1.23.9: 66659f614d06d0fe80c5eafdba7073940906de98ea5ee2a081d84fa37d8c5a21 + v1.23.8: b293fce0b3dec37d3f5b8875b8fddc64e02f0f54f54dd7742368973c52530890 + v1.23.7: 5d59447a5facd8623a79c2a296a68a573789d2b102b902aafb3a730fc4bb0d3b + v1.23.6: 4be771c8e6a082ba61f0367077f480237f9858ef5efe14b1dbbfc05cd42fc360 + v1.23.5: 15cd560c04def7bbe5ee3f6f75e2cfd3913371c7e76354f4b2d5d6f536b70e39 + v1.23.4: aa45dba48791eeb78a994a2723c462d155af4e39fdcfbcb39ce9c96f604a967a + v1.23.3: 6708d7a701b3d9ab3b359c6be27a3012b1c486fa1e81f79e5bdc71ffca2c38f9 + v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b + v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524 + v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe + amd64: + v1.25.5: 6a660cd44db3d4bfe1563f6689cbe2ffb28ee4baf3532e04fff2d7b909081c29 + v1.25.4: e4e569249798a09f37e31b8b33571970fcfbdecdd99b1b81108adc93ca74b522 + v1.25.3: f57e568495c377407485d3eadc27cda25310694ef4ffc480eeea81dea2b60624 + v1.25.2: 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb + v1.25.1: 9cc2d6ce59740b6acf6d5d4a04d4a7d839b0a81373248ef0ce6c8d707143435b + v1.25.0: e23cc7092218c95c22d8ee36fb9499194a36ac5b5349ca476886b7edc0203885 + v1.24.9: 7e13f33b7379b6c25c3ae055e4389eb3eef168e563f37b5c5f1be672e46b686e + v1.24.8: f93c18751ec715b4d4437e7ece18fe91948c71be1f24ab02a2dde150f5449855 + v1.24.7: 2d88e56d668b1d7575b4783f22d512e94da432f42467c3aeac8a300b6345f12d + v1.24.6: 3ba7e61aecb19eadfa5de1c648af1bc66f5980526645d9dfe682d77fc313b74c + v1.24.5: 3037f2ec62956e7146fc86defb052d8d3b28e2daa199d7e3ff06d1e06a6286ed + v1.24.4: 4a76c70217581ba327f0ad0a0a597c1a02c62222bb80fbfea4f2f5cb63f3e2d8 + v1.24.3: 8a45348bdaf81d46caf1706c8bf95b3f431150554f47d444ffde89e8cdd712c1 + v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542 + v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a + v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7 + v1.23.15: adab29cf67e04e48f566ce185e3904b5deb389ae1e4d57548fcf8947a49a26f5 + v1.23.14: 13ce4b18ba6e15d5d259249c530637dd7fb9722d121df022099f3ed5f2bd74cd + v1.23.13: fae6957e6a7047ad49cdd20976cd2ce9188b502c831fbf61f36618ea1188ba38 + v1.23.12: b150c7c4830cc3be4bedd8998bf36a92975c95cd1967b4ef2d1edda080ffe5d9 + v1.23.11: cf04ad2fa1cf118a951d690af0afbbe8f5fc4f02c721c848080d466e6159111e + v1.23.10: 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7 + v1.23.9: 053561f7c68c5a037a69c52234e3cf1f91798854527692acd67091d594b616ce + v1.23.8: 299803a347e2e50def7740c477f0dedc69fc9e18b26b2f10e9ff84a411edb894 + v1.23.7: b4c27ad52812ebf3164db927af1a01e503be3fb9dc5ffa058c9281d67c76f66e + v1.23.6: 703a06354bab9f45c80102abff89f1a62cbc2c6d80678fd3973a014acc7c500a + v1.23.5: 715da05c56aa4f8df09cb1f9d96a2aa2c33a1232f6fd195e3ffce6e98a50a879 + v1.23.4: 3f0398d4c8a5ff633e09abd0764ed3b9091fafbe3044970108794b02731c72d6 + v1.23.3: d7da739e4977657a3b3c84962df49493e36b09cc66381a5e36029206dd1e01d0 + v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde + v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e + v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f + ppc64le: + v1.25.5: 816b6bfcbe312a4e6fbaaa459f52620af307683470118b9a4afb0f8e1054beb8 + v1.25.4: 23f5cec67088fa0c3efc17110ede5f6120d3ad18ad6b996846642c2f46b43da0 + v1.25.3: bd59ac682fffa37806f768328fee3cb791772c4a12bcb155cc64b5c81b6c47ce + v1.25.2: 1e3665de15a591d52943e6417f3102b5d413bc1d86009801ad0def04e8c920c5 + v1.25.1: 957170066abc4d4c178ac8d84263a191d351e98978b86b0916c1b8c061da8282 + v1.25.0: dffe15c626d7921d77e85f390b15f13ebc3a9699785f6b210cd13fa6f4653513 + v1.24.9: 8893337877ae82280fd52b3ef2c9ea6a1e477a9f6ee3b04ea3ddbd00da2c85a0 + v1.24.8: 9ed85938808b6ae52a2d0b5523dc3122a7dcf8857d609b7d79a1733c72344dc1 + v1.24.7: a68ec0c8ed579324037fc0a3bafa9d10184e6ff3ca34bfffdcb78f9f02bcb765 + v1.24.6: 448009693a97428aec7e60cc117079724f890e3a46d0aa54accdb56f33ca0f3d + v1.24.5: 0861df1c77336fbe569887a884d62a24fcb6486d43798a8767dba7e5865c3c98 + v1.24.4: cfd7151471dd9878d48ab8d7bc3cf945c207e130568ee778f1aed9ceb84afd44 + v1.24.3: 893a83cd636650d1ad50be0e9a2517f2f4434c35646dacd9160b66446aee404e + v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f + v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5 + v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75 + v1.23.15: ec5488895862a8c0c4a45558f395801ab40e55956831d9e56ade1dd1ba3968ec + v1.23.14: 291127abe519e4a1c0193960d361ba5a58c21cddb4cfff8ae4e67c001671849d + v1.23.13: 785d620dc77d10ce49218894225e935e55d08bb3842ae75c11cb41a814aca9ea + v1.23.12: f9a8efede8872c23c54c44f09657fa522e99786f3dc73ba7d6d928e9b3c7dc1a + v1.23.11: 52556d4e8ba19e8b0a65e4ac70203922b42b054647ec59a0177a2c4f61b903e7 + v1.23.10: fc0867d7412d7698029413a8307d8e74748d47e402c075e8d6cc79ed772fb232 + v1.23.9: 141532b62ce75860975d5913bfbf784a09b0abc83ca7d31a6b1eddf28866ce67 + v1.23.8: 599ed10fc7e8fcb5884485cecf690c7645947d1f144b66d717a3f064f11c0b8f + v1.23.7: dab46d2ede0a930f1530ebf857da538ca0879bdb72fc71070d518849c45b9fae + v1.23.6: 3fdba4f852046b0ee782048cad9c1fe4db9c98cb882ff78b5bca4632984c7700 + v1.23.5: d625dbea2879d12ca1c61b1c00084405a34514abaea1096110c8c8661cfac84f + v1.23.4: 1648768124315c5cbcfa6c24a31a34037558c09b91ead60267e13d6c7f3b597b + v1.23.3: 7297e595ed549bac93decda41c9830a3e032fd374467d679c98ef35dcdd1d2aa + v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015 + v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a + v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851 +kubeadm_checksums: + arm: + v1.25.5: c1753bffff88e3f192acc46f2ea4b7058a920c593f475cfb0ea015e6d9667ee1 + v1.25.4: a20379513e5d91073a52a0a3e7a9201e2d7b23daa55d68456465d8c9ef69427c + v1.25.3: 3f357e1e57936ec7812d35681be249b079bbdc1c7f13a75e6159379398e37d5e + v1.25.2: 2f794569c3322bb66309c7f67126b7f88155dfb1f70eea789bec0edf4e10015e + v1.25.1: ecb7a459ca23dfe527f4eedf33fdb0df3d55519481a8be3f04a5c3a4d41fa588 + v1.25.0: 67b6b58cb6abd5a4c9024aeaca103f999077ce6ec8e2ca13ced737f5139ad2f0 + v1.24.9: ba58cb05a6bcb7b974223df7de6b0af38e4eb78b944b775de5166337288cf172 + v1.24.8: 5117a0f3b652950bee328ee9583504fe50c012290436e56f6f4b9d7219ad2591 + v1.24.7: c0a9e6c08cad0b727f06bb3b539d55c65ea977be68fe471f6a9f73af3fbcb275 + v1.24.6: 760f0fc195f00ca3d1612e0974461ab937c25aa1e7a2f8d2357cd1336b2ecf3a + v1.24.5: 973f1ad7da9216fe3e0319a0c4fcb519a21a773cd39a0a445e689bea3d4a27c7 + v1.24.4: e0c1510ab2ed1cd555abad6f226454a3206aaaf20474da7dcf976ddc86a065d4 + v1.24.3: dc90c93e2305a7babafc41185a43435a9f3af2ef5d546bbd06e6553898e43d9e + v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266 + v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153 + v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2 + v1.23.15: 2e2a09d8e76202007b9bf97010b62de0867dfe6df83f155c3b703ea8ee0bc68f + v1.23.14: de222c7f05e90ae263b988e191a1b907c593c4ddc363277dae24d91ba694c731 + v1.23.13: 54d0f4d7a65abf610606b0538005ab5f177566587a81af6b0bc24ded2f8e305c + v1.23.12: 6da38118a7a1570ad76389f0492c11f8ae8e2068395773b89a2b0442d02e604c + v1.23.11: 4ea0f63d245d01eccc5c3f2c849e2c799392d5e37c9bc4c0ec7a06a5d3722622 + v1.23.10: e0db03e8c4c06c3c3e5e29558fa316b0b56ac9d2801751c4a36b2e3f84455b1f + v1.23.9: fa265d592d4f85b083919baa80b232deae20acaf2a20095a9c417c4d5324e002 + v1.23.8: 24d159ac19b519453050a977d2f238873c328e3a9dd3dfe524a32f421b64dadb + v1.23.7: 18da04d52a05f2b1b8cd7163bc0f0515a4ee793bc0019d2cada4bbf3323d4044 + v1.23.6: da2221f593e63195736659e96103a20e4b7f2060c3030e8111a4134af0d37cfb + v1.23.5: 9ea3e52cb236f446a33cf69e4ed6ac28a76103c1e351b2675cb9bfcb77222a61 + v1.23.4: 9ca72cf1e6bbbe91bf634a18571c84f3fc36ba5fcd0526b14432e87b7262a5ee + v1.23.3: cb2513531111241bfb0f343cff18f7b504326252ae080bb69ad1ccf3e31a2753 + v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e + v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369 + v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda + arm64: + v1.25.5: 426dddad1c60b7617f4095507cef524d76ec268a0201c1df154c108287a0b98e + v1.25.4: 3f5b273e8852d13fa39892a30cf64928465c32d0eb741118ba89714b51f03cd5 + v1.25.3: 61bb61eceff78b44be62a12bce7c62fb232ce1338928e4207deeb144f82f1d06 + v1.25.2: 437dc97b0ca25b3fa8d74b39e4059a77397b55c1a6d16bddfd5a889d91490ce0 + v1.25.1: f4d57d89c53b7fb3fe347c9272ed40ec55eab120f4f09cd6b684e97cb9cbf1f0 + v1.25.0: 07d9c6ffd3676502acd323c0ca92f44328a1f0e89a7d42a664099fd3016cf16b + v1.24.9: 57c61562a9de4cc78f276f665d7f04666607b17e3ad0fa6c14be64ad85c80951 + v1.24.8: 6f35562001e859f2a76a89c0da61f09433cc6628ccbc3992e82a977e0e348870 + v1.24.7: ee946d82173b63f69be9075e218250d4ab1deec39d17d600b16b6743e5dca289 + v1.24.6: 211b8d1881468bb673b26036dbcfa4b12877587b0a6260ffd55fd87c2aee6e41 + v1.24.5: a68c6dd24ef47825bb34a2ad430d76e6b4d3cbe92187363676993d0538013ac2 + v1.24.4: 18de228f6087a2e5243bffcd2cc88c40180a4fa83e4de310ad071b4620bdd8b6 + v1.24.3: ea0fb451b69d78e39548698b32fb8623fad61a1a95483fe0add63e3ffb6e31b5 + v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548 + v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04 + v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004 + v1.23.15: 8bb17c69ad71bb1230dbe1e598c6ae07390b57e3ba32928f28e83742105424d0 + v1.23.14: 7c21c1fa6a852b10ddea7bd1797ce8b4498d6898014d17d20748307e510a0826 + v1.23.13: 462971d5822c91598754dfaa9c4c8d46a8c74aefef0f4dbbc8be31c4f0d18855 + v1.23.12: d05f6765a65f7541d07aad989ee80cd730c395f042afbe0526f667ea1a0b2947 + v1.23.11: 329d9aa9461baf4a7b7225e664ec1ecd61512b937e1f160f9a303bc0f0d44bbb + v1.23.10: 42e957eebef78f6462644d9debc096616054ebd2832e95a176c07c28ebed645c + v1.23.9: a0a007023db78e5f78d3d4cf3268b83f093201847c1c107ffb3dc695f988c113 + v1.23.8: 9b3d8863ea4ab0438881ccfbe285568529462bc77ef4512b515397a002d81b22 + v1.23.7: 65fd71aa138166039b7f4f3695308064abe7f41d2f157175e6527e60fb461eae + v1.23.6: a4db7458e224c3a2a7b468fc2704b31fec437614914b26a9e3d9efb6eecf61ee + v1.23.5: 22a8468abc5d45b3415d694ad52cc8099114248c3d1fcf4297ec2b336f5cc274 + v1.23.4: 90fd5101e321053cdb66d165879a9cde18f19ba9bb8eae152fd4f4fcbe497be1 + v1.23.3: 5eceefa3ca737ff1532f91bdb9ef7162882029a2a0300b4348a0980249698398 + v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff + v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40 + v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6 + amd64: + v1.25.5: af0b25c7a995c2d208ef0b9d24b70fe6f390ebb1e3987f4e0f548854ba9a3b87 + v1.25.4: b8a6119d2a3a7c6add43dcf8f920436bf7fe71a77a086e96e40aa9d6f70be826 + v1.25.3: 01b59ce429263c62b85d2db18f0ccdef076b866962ed63971ff2bd2864deea7b + v1.25.2: 63ee3de0c386c6f3c155874b46b07707cc72ce5b9e23f336befd0b829c1bd2ad + v1.25.1: adaa1e65c1cf9267a01e889d4c13884f883cf27948f00abb823f10486f1a8420 + v1.25.0: 10b30b87af2cdc865983d742891eba467d038f94f3926bf5d0174f1abf6628f8 + v1.24.9: 20406971ae71886f7f8ee7b9a33c885391ae64da561fb679d5819f2ccc19ac9f + v1.24.8: 9fea42b4fb5eb2da638d20710ebb791dde221e6477793d3de70134ac058c4cc7 + v1.24.7: 8b67319d28bf37e8e7c224954dc778cbe946f2bb0ed86975d8caa83d51c955ee + v1.24.6: 7f4443fd42e0e03f6fd0c7218ca7e2634c9255d5f9d7c581fe362e19098aec4c + v1.24.5: 3b9c1844ec0fc3c94015d63470b073a7b219082b6a6424c6b0da9cf97e234aeb + v1.24.4: 9ec08e0905c0a29a68676ba9f6dd7de73bef13cfa2b846a45e1c2189572dc57c + v1.24.3: 406d5a80712c45d21cdbcc51aab298f0a43170df9477259443d48eac116998ff + v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712 + v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0 + v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318 + v1.23.15: 63329e21be8367628f71978cfc140c74ce9cb0336abd9c4802ca7d20d5dec3c3 + v1.23.14: 46c847e2699839b9ccf6673f0b946c4778a3a2e8e463d15854ba30d3f0cbd87a + v1.23.13: ff86af2b5fa979234dd3f9e7b04ec7d3017239a58417397153726d8077c4ac89 + v1.23.12: bf45d00062688d21ff479bf126e1259d0ce3dee1c5c2fcd803f57497cd5e9e83 + v1.23.11: 2f10bd298a694d3133ea19192b796a106c282441e4148c114c39376042097692 + v1.23.10: 43d186c3c58e3f8858c6a22bc71b5441282ac0ccbff6f1d0c2a66ee045986b64 + v1.23.9: 947571c50ab840796fdd4ffb129154c005dfcb0fe83c6eff392d46cf187fd296 + v1.23.8: edbd60fd6a7e11c71f848b3a6e5d1b5a2bb8ebd703e5490caa8db267361a7b89 + v1.23.7: d7d863213eeb4791cdbd7c5fd398cf0cc2ef1547b3a74de8285786040f75efd2 + v1.23.6: 9213c7d738e86c9a562874021df832735236fcfd5599fd4474bab3283d34bfd7 + v1.23.5: 8eebded187ee84c97003074eaa347e34131fef3acdf3e589a9b0200f94687667 + v1.23.4: c91912c9fd34a50492f889e08ff94c447fdceff150b588016fecc9051a1e56b8 + v1.23.3: 57ec7f2921568dcf4cda0699b877cc830d49ddd2709e035c339a5afc3b83586f + v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219 + v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34 + v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0 + ppc64le: + v1.25.5: d69b73af9e327cba5c771daf8320821ccda703f38506ee4ec5b1ff3776a6eb8f + v1.25.4: 9703e40cb0df48052c3cfb0afc85dc582e600558ab687d6409f40c382f147976 + v1.25.3: 8fe9a69db91c779a8f29b216134508ba49f999fa1e36b295b99444f31266da17 + v1.25.2: a53101ed297299bcf1c4f44ec67ff1cb489ab2d75526d8be10c3068f161601a7 + v1.25.1: c7e2c8d2b852e1b30894b64875191ce388a3a416d41311b21f2d8594872fe944 + v1.25.0: 31bc72e892f3a6eb5db78003d6b6200ba56da46a746455991cb422877afc153d + v1.24.9: abf04047a45f602e455ab7df92ae5500b543fe5ef13fb67d050f3d28dfd1906c + v1.24.8: eccd3fd892b253a8632f3c4a917c19fff4982dd436f8f7de94868a0062c0bf2b + v1.24.7: 29a53be9a74dcb01ea68b0a385bdd9b510f9792955f9f7c93ed608c851b5dc32 + v1.24.6: 9d73bfde24ee9781fcca712658f297a041408b534f875f5e093222ed64c91c15 + v1.24.5: f416c45ca5826ea3ff13be393911424a0fba3aa30b5557d3d32541551566142a + v1.24.4: 00fe93a291ddca28188056e597fc812b798706ea19b2da6f8aaf688f6ea95c0e + v1.24.3: 1cb40441d8982362c6d4ffdd9a980a4563dcc5cccc1bb1d7370f0bd7340484d2 + v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb + v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137 + v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c + v1.23.15: 18eaf8177720fbed8c09d3e83e6066891ca9fc629986b35a2012cafe9febd5d0 + v1.23.14: 529811ef359095fe33a1d94d20fca312c25a1513baf799513c47711d34bd73ad + v1.23.13: 3dbf72fdfc108bf41cab151ac340b336ba17b14fa008b15d84ce223b30391914 + v1.23.12: ccae0a4c81a60e50219954393432c5f4d4692847c866ca497a48a1118f417d0d + v1.23.11: 9930cfb4ae7663f145c1d08e06c49ab60e28a6613ac5c7b19d047f15c1e24c22 + v1.23.10: c9f484bd8806f50ce051a28776ef92e3634a1cdc0a47c9483ee77c34cde845c1 + v1.23.9: 03643613aa6afc6251270adc7681029d4fc10e8a75d553a1d8e63cf5b5a2a8fe + v1.23.8: dcfb69f564b34942136cc4cc340b1c800e3e610292e517e68ab5e0157b9510af + v1.23.7: 525d43db6d24ac048606cb63ff0f737d87473deff66d4c43ed5ae716ed4fb263 + v1.23.6: 0b975ac27fa794134a5a25dfbf6df598e2b62e483134326788443131f6d8e5e4 + v1.23.5: bec93d18fd5e5ef6d5da3d18edb282e58a64ff34ec3544d82dc31a3255d9ed1d + v1.23.4: 9c681254bf7cfce8b94326364d677f1944c0afb070f666f7fd438bd37133f7cc + v1.23.3: fd87d972db45dd6f623dd4ca06075e7e697f1bdaa7936c5c06924d1189ba7ff8 + v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01 + v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc + v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1 + +etcd_binary_checksums: + # Etcd does not have arm32 builds at the moment, having some dummy value is + # required to avoid "no attribute" error + arm: + v3.5.6: 0 + arm64: + v3.5.6: 888e25c9c94702ac1254c7655709b44bb3711ebaabd3cb05439f3dd1f2b51a87 + amd64: + v3.5.6: 4db32e3bc06dd0999e2171f76a87c1cffed8369475ec7aa7abee9023635670fb + ppc64le: + v3.5.6: e235cb885996b8aac133975e0077eaf0a2f8dc7062ad052fa7395668a365906b + +cni_binary_checksums: + arm: + v1.0.1: d35e3e9fd71687fc7e165f7dc7b1e35654b8012995bbfd937946b0681926d62d + v1.1.1: 84f97baf80f9670a8cd0308dedcc8405d2bbc65166d670b48795e0d1262b4248 + arm64: + v1.0.1: 2d4528c45bdd0a8875f849a75082bc4eafe95cb61f9bcc10a6db38a031f67226 + v1.1.1: 16484966a46b4692028ba32d16afd994e079dc2cc63fbc2191d7bfaf5e11f3dd + amd64: + v1.0.1: 5238fbb2767cbf6aae736ad97a7aa29167525dcd405196dfbc064672a730d3cf + v1.1.1: b275772da4026d2161bf8a8b41ed4786754c8a93ebfb6564006d5da7f23831e5 + ppc64le: + v1.0.1: f078e33067e6daaef3a3a5010d6440f2464b7973dec3ca0b5d5be22fdcb1fd96 + v1.1.1: 1551259fbfe861d942846bee028d5a85f492393e04bcd6609ac8aaa7a3d71431 + +calicoctl_binary_checksums: + arm: + v3.24.5: 0 + v3.23.3: 0 + v3.22.4: 0 + v3.21.6: 0 + amd64: + v3.24.5: 01e6c8a2371050f9edd0ade9dcde89da054e84d8e96bd4ba8cf82806c8d3e8e7 + v3.23.3: d9c04ab15bad9d8037192abd2aa4733a01b0b64a461c7b788118a0d6747c1737 + v3.22.4: cc412783992abeba6dc01d7bc67bdb2e3a0cf2f27fc3334bdfc02d326c3c9e15 + v3.21.6: 20335301841ba1dd0795e834ecce0d8e6b89f0b01d781dcc95339419462b3b67 + arm64: + v3.24.5: 2d56b768ed346129b0249261db27d97458cfb35f98bd028a0c817a23180ab2d2 + v3.23.3: 741b222f9bb10b7b5e268e5362796061c8862d4f785bb6b9c4f623ea143f4682 + v3.22.4: e84ba529091818282012fd460e7509995156e50854781c031c81e4f6c715a39a + v3.21.6: 8f4ca86e21364eb23fb4676a0a1ed9e751c8a044360b22eae9ee6af7e81c3d59 + ppc64le: + v3.24.5: 4c40d1703a31eb1d1786287fbf295d614eb9594a4748e505a03a2fbb6eda85b4 + v3.23.3: f83efcd8d3d7c96dfe8e596dc9739eb5d9616626a6afba29b0af97e5c222575a + v3.22.4: f8672ac27ab72c1b05b0f9ae5694881ef8e061bfbcf551f964e7f0a37090a243 + v3.21.6: f7aad0409de2838ba691708943a2aeeef6fb9c02a0475293106e179dc48a4632 + +ciliumcli_binary_checksums: + arm: + v0.12.4: 8e0596d321c97a55449942c2ebd8bb0102dc6a9381919287e383b679cee8f524 + v0.12.5: 1c9a8cf8df62eb814d6c90f6ad6a1c074f991fde5b5573059d27729f12619496 + amd64: + v0.12.4: 6b4f899fa09b6558a89a32ace3be4dedca08b7f4b76f04931ed1ffb2de8965e2 + v0.12.5: 6b2c9031e4264482b18873ad337394442b8787d6ac26e16e865d36f320c650f0 + arm64: + v0.12.4: e037f34fded56e4199e9e7ff1ce623d2516be7116a6490e02377f786acec5bda + v0.12.5: b779d4b04b23fcae30cc158ce9d29e2cad0c98bd88582c0a2c8d457c71d5c4b3 + ppc64le: + v0.12.4: 0 + v0.12.5: 0 + +calico_crds_archive_checksums: + v3.24.5: 10320b45ebcf4335703d692adacc96cdd3a27de62b4599238604bd7b0bedccc3 + v3.23.3: d25f5c9a3adeba63219f3c8425a8475ebfbca485376a78193ec1e4c74e7a6115 + v3.22.4: e72e7b8b26256950c1ce0042ac85fa83700154dae9723c8d007de88343f6a7e5 + v3.21.6: db4fa80b79b39853f0b1a04d875c110b637dd8754bf7b4cec06ae510fb8a2acd + +krew_archive_checksums: + linux: + arm: + v0.4.3: 68eb9e9f5bba29c7c19fb52bfc43a31300f92282a4e81f0c51ad26ed2c73eb03 + arm64: + v0.4.3: 0994923848882ad0d4825d5af1dc227687a10a02688f785709b03549dd34d71d + amd64: + v0.4.3: 5df32eaa0e888a2566439c4ccb2ef3a3e6e89522f2f2126030171e2585585e4f + ppc64le: + v0.4.3: 0 + + darwin: + arm: + v0.4.3: 0 + arm64: + v0.4.3: 22f29ce3c3c9c030e2eaf3939d2b00f0187dfdbbfaee37fba8ffaadc46e51372 + amd64: + v0.4.3: 6f6a774f03ad4190a709d7d4dcbb4af956ca0eb308cb0d0a44abc90777b0b21a + ppc64le: + v0.4.3: 0 + + windows: + arm: + v0.4.3: 0 + arm64: + v0.4.3: 0 + amd64: + v0.4.3: d1343a366a867e9de60b23cc3d8ee935ee185af25ff8f717a5e696ba3cae7c85 + ppc64le: + v0.4.3: 0 + + +helm_archive_checksums: + arm: + v3.10.3: dca718eb68c72c51fc7157c4c2ebc8ce7ac79b95fc9355c5427ded99e913ec4c + arm64: + v3.10.3: 260cda5ff2ed5d01dd0fd6e7e09bc80126e00d8bdc55f3269d05129e32f6f99d + amd64: + v3.10.3: 950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 + ppc64le: + v3.10.3: 93cdf398abc68e388d1b46d49d8e1197544930ecd3e81cc58d0a87a4579d60ed + +cri_dockerd_archive_checksums: + arm: + 0.2.2: 0 + arm64: + 0.2.2: 30e5fb2f06bd1e9fff6eddc185356cf3636d36c6c310bbd5892141e2b8e86ee3 + amd64: + 0.2.2: fbf0fe66805e0104841d0093c6ad74a5e39264616855d902a97c1ba7830855e1 + ppc64le: + 0.2.2: 0 + +runc_checksums: + arm: + v1.1.0: 0 + v1.1.1: 0 + v1.1.2: 0 + v1.1.3: 0 + v1.1.4: 0 + arm64: + v1.1.0: 9ec8e68feabc4e7083a4cfa45ebe4d529467391e0b03ee7de7ddda5770b05e68 + v1.1.1: 20c436a736547309371c7ac2a335f5fe5a42b450120e497d09c8dc3902c28444 + v1.1.2: 6ebd968d46d00a3886e9a0cae2e0a7b399e110cf5d7b26e63ce23c1d81ea10ef + v1.1.3: 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f + v1.1.4: dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223 + amd64: + v1.1.0: ab1c67fbcbdddbe481e48a55cf0ef9a86b38b166b5079e0010737fd87d7454bb + v1.1.1: 5798c85d2c8b6942247ab8d6830ef362924cd72a8e236e77430c3ab1be15f080 + v1.1.2: e0436dfc5d26ca88f00e84cbdab5801dd9829b1e5ded05dcfc162ce5718c32ce + v1.1.3: 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01 + v1.1.4: db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce + ppc64le: + v1.1.0: 4a6b2f43c0f2371b1948b2eceb906fd8b9d8f5e9f6bab7d21bc037f5b300f43e + v1.1.1: 5f14bca6e35177134251dfd3c44bccb81136d9043508e7a37494ad9485f5f0e4 + v1.1.2: 545ac8165646ed2b157fae677dd6509baf10e370ebe67c23b2f800163fa97150 + v1.1.3: 3b1b7f953fc8402dec53dcf2de05b6b72d86850737efa9766f8ffefc7cae3c0a + v1.1.4: 0f7fb3d2426b6012d9b33c354c778c0ffbce02c329c4c16c1189433a958fd60d + +crun_checksums: + arm: + 1.4.3: 0 + 1.4.4: 0 + 1.4.5: 0 + amd64: + 1.4.3: 6255325b641be6a3cfb33b5bd7790c035337ad18b9c012d0fbe0e4173a2dbd29 + 1.4.4: 73f7f89a98f69c0bf0e9fe1e0129201d5b72529785b4b1bcb4d43c31d0c3a8ea + 1.4.5: 84cf20a6060cd53ac21a0590367d1ab65f74baae005c42f2d5bc1af918470455 + arm64: + 1.4.3: f4f328c99f879273ed475f6f7766904948808de0268a48d92e6e0f2038a1989d + 1.4.4: 2ad2c02ec0b1566f1c5e85223b726b704904cc75c2eb4af298e95b98fe5c166d + 1.4.5: 64a01114060ec12e66b1520c6ee6967410022d1ec73cdc7d14f952343c0769f2 + ppc64le: + 1.4.3: 0 + 1.4.4: 0 + 1.4.5: 0 + +youki_checksums: + arm: + 0.0.1: 0 + amd64: + 0.0.1: 8bd712fe95c8a81194bfbc54c70516350f95153d67044579af95788fbafd943b + arm64: + 0.0.1: 0 + ppc64le: + 0.0.1: 0 + +kata_containers_binary_checksums: + arm: + 2.0.4: 0 + 2.1.1: 0 + 2.2.2: 0 + 2.2.3: 0 + 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 + amd64: + 2.0.4: 022a60c2d92a5ab9a5eb83d5a95154a2d06fdc2206b2a473d902ccc86766371a + 2.1.1: a83591d968cd0f1adfb5025d7aa33ca1385d4b1165ff10d74602302fc3c0373f + 2.2.2: 2e3ac77b8abd4d839cf16780b57aee8f3d6e1f19489edd7d6d8069ea3cc3c18a + 2.2.3: e207ab5c8128b50fe61f4f6f98fd34af0fa5ebc0793862be6d13a2674321774f + 2.3.0: 430fa55b387b3bafbbabb7e59aa8c809927a22f8d836732a0719fd2e1d131b31 + 2.4.0: fca40fa4e91efc79c75367ffe09ca32ad795d302aacb91992874f40bfc00348f + 2.4.1: e234ffce779d451dc2a170b394b91d35b96e44ea50dc4a3256defa603efdf607 + arm64: + 2.0.4: 0 + 2.1.1: 0 + 2.2.2: 0 + 2.2.3: 0 + 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 + ppc64le: + 2.0.4: 0 + 2.1.1: 0 + 2.2.2: 0 + 2.2.3: 0 + 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 + +gvisor_runsc_binary_checksums: + arm: + 20210921: 0 + arm64: + 20210921: 74a916dcd64a7a8347d91c882701363cf2721d53f0db237f0c0b2d708d661e55 + amd64: + 20210921: af5b4527b2d63eea6d0cc2c5522b1e76163df695e9484475e378ec29f7baa661 + ppc64le: + 20210921: 0 + +gvisor_containerd_shim_binary_checksums: + arm: + 20210921: 0 + arm64: + 20210921: 51e466a05256eb2d40fe3cc987ec486212df4af6f79e53630dfd822b9bc1fb2f + amd64: + 20210921: 9ed085fcdbf6f300474e10f2f32b323038568342ce8130298f56e13d14484daa + ppc64le: + 20210921: 0 + +nerdctl_archive_checksums: + arm: + 1.0.0: 8fd283a2f2272b15f3df43cd79642c25f19f62c3c56ad58bb68afb7ed92904c2 + arm64: + 1.0.0: 27622c9d95efe6d807d5f3770d24ddd71719c6ae18f76b5fc89663a51bcd6208 + amd64: + 1.0.0: 3e993d714e6b88d1803a58d9ff5a00d121f0544c35efed3a3789e19d6ab36964 + ppc64le: + 1.0.0: 2fb02e629a4be16b194bbfc64819132a72ede1f52596bd8e1ec2beaf7c28c117 + +containerd_archive_checksums: + arm: + 1.5.5: 0 + 1.5.7: 0 + 1.5.8: 0 + 1.5.9: 0 + 1.5.10: 0 + 1.5.11: 0 + 1.5.12: 0 + 1.5.13: 0 + 1.6.0: 0 + 1.6.1: 0 + 1.6.2: 0 + 1.6.3: 0 + 1.6.4: 0 + 1.6.5: 0 + 1.6.6: 0 + 1.6.7: 0 + 1.6.8: 0 + 1.6.9: 0 + 1.6.10: 0 + 1.6.11: 0 + 1.6.12: 0 + 1.6.13: 0 + 1.6.14: 0 + arm64: + 1.5.5: 0 + 1.5.7: 0 + 1.5.8: 0 + 1.5.9: 0 + 1.5.10: 0 + 1.5.11: 0 + 1.5.12: 0 + 1.5.13: 0 + 1.6.0: 6eff3e16d44c89e1e8480a9ca078f79bab82af602818455cc162be344f64686a + 1.6.1: fbeec71f2d37e0e4ceaaac2bdf081295add940a7a5c7a6bcc125e5bbae067791 + 1.6.2: a4b24b3c38a67852daa80f03ec2bc94e31a0f4393477cd7dc1c1a7c2d3eb2a95 + 1.6.3: 354e30d52ff94bd6cd7ceb8259bdf28419296b46cf5585e9492a87fdefcfe8b2 + 1.6.4: 0205bd1907154388dc85b1afeeb550cbb44c470ef4a290cb1daf91501c85cae6 + 1.6.5: 2833e2f0e8f3cb5044566d64121fdd92bbdfe523e9fe912259e936af280da62a + 1.6.6: 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb + 1.6.7: 4167bf688a0ed08b76b3ac264b90aad7d9dd1424ad9c3911e9416b45e37b0be5 + 1.6.8: b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd + 1.6.9: 140197aee930a8bd8a69ff8e0161e56305751be66e899dccd833c27d139f4f47 + 1.6.10: 6d655e80a843f480e1c1cead18479185251581ff2d4a2e2e5eb88ad5b5e3d937 + 1.6.11: 1b34d8ff067da482af021dac325dc4e993d7356c0bd9dc8e5a3bb8271c1532de + 1.6.12: 0a0133336596b2d1dcafe3587eb91ab302afc28f273614e0e02300694b5457a0 + 1.6.13: 8c7892ae7c2e96a4a9358b1064fb5519a5c0528b715beee67b72e74d7a644064 + 1.6.14: 3ccb61218e60cbba0e1bbe1e5e2bf809ac1ead8eafbbff36c3195d3edd0e4809 + amd64: + 1.5.5: 8efc527ffb772a82021800f0151374a3113ed2439922497ff08f2596a70f10f1 + 1.5.7: 109fc95b86382065ea668005c376360ddcd8c4ec413e7abe220ae9f461e0e173 + 1.5.8: feeda3f563edf0294e33b6c4b89bd7dbe0ee182ca61a2f9b8c3de2766bcbc99b + 1.5.9: a457793a1643657588baf46d3ffbf44fae0139b65076064e237ddf29cd838ba4 + 1.5.10: 44f809e02233a510bb9d136906849e9ed058aa1d3d714244376001ab77464db7 + 1.5.11: f2a2476ca44a24067488cd6d0b064b2128e01f6f53e5f29c5acfaf1520927ee2 + 1.5.12: 301833f6377e9471a2cf1a1088ba98826db7e8fe9d3ffdc9f570b0638bcd3a1f + 1.5.13: 7b5b34f30a144985e849bdeb0921cfd3fe65f9508b5707fd237fd2c308d9abae + 1.6.0: f77725e4f757523bf1472ec3b9e02b09303a5d99529173be0f11a6d39f5676e9 + 1.6.1: c1df0a12af2be019ca2d6c157f94e8ce7430484ab29948c9805882df40ec458b + 1.6.2: 3d94f887de5f284b0d6ee61fa17ba413a7d60b4bb27d756a402b713a53685c6a + 1.6.3: 306b3c77f0b5e28ed10d527edf3d73f56bf0a1fb296075af4483d8516b6975ed + 1.6.4: f23c8ac914d748f85df94d3e82d11ca89ca9fe19a220ce61b99a05b070044de0 + 1.6.5: cf02a2da998bfcf61727c65ede6f53e89052a68190563a1799a7298b0cea86b4 + 1.6.6: 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef + 1.6.7: 52e817b712d521b193773529ff33626f47507973040c02474a2db95a37da1c37 + 1.6.8: 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e + 1.6.9: 9ee2644bfb95b23123f96b564df2035ec94a46f64060ae12322e09a8ec3c2b53 + 1.6.10: dd1f4730daf728822aea3ba35a440e14b1dfa8f1db97288a59a8666676a13637 + 1.6.11: 21870d7022c52f5f74336d440deffb208ba747b332a88e6369e2aecb69382e48 + 1.6.12: a56c39795fd0d0ee356b4099a4dfa34689779f61afc858ef84c765c63e983a7d + 1.6.13: 97f00411587512e62ec762828e581047b23199f8744754706d09976ec24a2736 + 1.6.14: 7da626d46c4edcae1eefe6d48dc6521db3e594a402715afcddc6ac9e67e1bfcd + ppc64le: + 1.5.5: 0 + 1.5.7: 0 + 1.5.8: 0 + 1.5.9: 0 + 1.5.10: 0 + 1.5.11: 0 + 1.5.12: 0 + 1.5.13: 0 + 1.6.0: 0 + 1.6.1: 0 + 1.6.2: 0 + 1.6.3: 0 + 1.6.4: 0 + 1.6.5: 0 + 1.6.6: 0 + 1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9 + 1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8 + 1.6.9: fe0046437cfe971ef0b3101ee69fcef5cf52e8868de708d35f8b82f998044f6e + 1.6.10: 704b1affd306b807fe6b4701d778129283635c576ecedc6d0a9da5370a07d56a + 1.6.11: e600a5714ffb29937b3710f9ae81bb7aa15b7b6661192f5e8d0b9b58ac6d5e66 + 1.6.12: 088e4d1fe1787fc4a173de24a58da01880d1ead5a13f1ab55e1ade972d3907d4 + 1.6.13: f2508ada0c8bd7d3cb09b0e7f10416aba3d643c0da7adc27efe4e76d444322ae + 1.6.14: 73025da0666079fc3bbd48cf185da320955d323c7dc42d8a4ade0e7926d62bb0 +skopeo_binary_checksums: + arm: + v1.10.0: 0 + arm64: + v1.10.0: 3bfc344d4940df29358f8056de7b8dd488b88a5d777b3106748ba66851fa2c58 + amd64: + v1.10.0: 20fbd1bac1d33768c3671e4fe9d90c5233d7e13a40e4935b4b24ebc083390604 + ppc64l3: + v1.10.0: 0 + +etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}" +cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}" +kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}" +kubectl_binary_checksum: "{{ kubectl_checksums[image_arch][kube_version] }}" +kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}" +calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}" +calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}" +ciliumcli_binary_checksum: "{{ ciliumcli_binary_checksums[image_arch][cilium_cli_version] }}" +crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}" +crio_archive_checksum: "{{ crio_archive_checksums[image_arch][crio_version] }}" +cri_dockerd_archive_checksum: "{{ cri_dockerd_archive_checksums[image_arch][cri_dockerd_version] }}" +helm_archive_checksum: "{{ helm_archive_checksums[image_arch][helm_version] }}" +runc_binary_checksum: "{{ runc_checksums[image_arch][runc_version] }}" +crun_binary_checksum: "{{ crun_checksums[image_arch][crun_version] }}" +youki_archive_checksum: "{{ youki_checksums[image_arch][youki_version] }}" +kata_containers_binary_checksum: "{{ kata_containers_binary_checksums[image_arch][kata_containers_version] }}" +gvisor_runsc_binary_checksum: "{{ gvisor_runsc_binary_checksums[image_arch][gvisor_version] }}" +gvisor_containerd_shim_binary_checksum: "{{ gvisor_containerd_shim_binary_checksums[image_arch][gvisor_version] }}" +nerdctl_archive_checksum: "{{ nerdctl_archive_checksums[image_arch][nerdctl_version] }}" +krew_archive_checksum: "{{ krew_archive_checksums[host_os][image_arch][krew_version] }}" +containerd_archive_checksum: "{{ containerd_archive_checksums[image_arch][containerd_version] }}" +skopeo_binary_checksum: "{{ skopeo_binary_checksums[image_arch][skopeo_version] }}" + +# Containers +# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker, +# it helps a lot for local private development or bare metal environment. +# So you need define --registry-mirror or --insecure-registry, and modify the following url address. +# example: +# You need to deploy kubernetes cluster on local private development. +# Also provide the address of your own private registry. +# And use --insecure-registry options for docker +kube_proxy_image_repo: "{{ kube_image_repo }}/kube-proxy" +etcd_image_repo: "{{ quay_image_repo }}/coreos/etcd" +etcd_image_tag: "{{ etcd_version }}" +flannel_image_repo: "{{ docker_image_repo }}/flannelcni/flannel" +flannel_image_tag: "{{ flannel_version }}-{{ image_arch }}" +flannel_init_image_repo: "{{ docker_image_repo }}/flannelcni/flannel-cni-plugin" +flannel_init_image_tag: "{{ flannel_cni_version }}-{{ image_arch }}" +calico_node_image_repo: "{{ quay_image_repo }}/calico/node" +calico_node_image_tag: "{{ calico_version }}" +calico_cni_image_repo: "{{ quay_image_repo }}/calico/cni" +calico_cni_image_tag: "{{ calico_cni_version }}" +calico_flexvol_image_repo: "{{ quay_image_repo }}/calico/pod2daemon-flexvol" +calico_flexvol_image_tag: "{{ calico_flexvol_version }}" +calico_policy_image_repo: "{{ quay_image_repo }}/calico/kube-controllers" +calico_policy_image_tag: "{{ calico_policy_version }}" +calico_typha_image_repo: "{{ quay_image_repo }}/calico/typha" +calico_typha_image_tag: "{{ calico_typha_version }}" +calico_apiserver_image_repo: "{{ quay_image_repo }}/calico/apiserver" +calico_apiserver_image_tag: "{{ calico_apiserver_version }}" +pod_infra_image_repo: "{{ kube_image_repo }}/pause" +pod_infra_image_tag: "{{ pod_infra_version }}" +netcheck_version: "v1.2.2" +netcheck_agent_image_repo: "{{ docker_image_repo }}/mirantis/k8s-netchecker-agent" +netcheck_agent_image_tag: "{{ netcheck_version }}" +netcheck_server_image_repo: "{{ docker_image_repo }}/mirantis/k8s-netchecker-server" +netcheck_server_image_tag: "{{ netcheck_version }}" +netcheck_etcd_image_tag: "v3.4.17" +weave_kube_image_repo: "{{ docker_image_repo }}/weaveworks/weave-kube" +weave_kube_image_tag: "{{ weave_version }}" +weave_npc_image_repo: "{{ docker_image_repo }}/weaveworks/weave-npc" +weave_npc_image_tag: "{{ weave_version }}" +cilium_image_repo: "{{ quay_image_repo }}/cilium/cilium" +cilium_image_tag: "{{ cilium_version }}" +cilium_operator_image_repo: "{{ quay_image_repo }}/cilium/operator" +cilium_operator_image_tag: "{{ cilium_version }}" +cilium_hubble_relay_image_repo: "{{ quay_image_repo }}/cilium/hubble-relay" +cilium_hubble_relay_image_tag: "{{ cilium_version }}" +cilium_hubble_certgen_image_repo: "{{ quay_image_repo }}/cilium/certgen" +cilium_hubble_certgen_image_tag: "v0.1.8" +cilium_hubble_ui_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui" +cilium_hubble_ui_image_tag: "v0.9.2" +cilium_hubble_ui_backend_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui-backend" +cilium_hubble_ui_backend_image_tag: "v0.9.2" +cilium_hubble_envoy_image_repo: "{{ docker_image_repo }}/envoyproxy/envoy" +cilium_hubble_envoy_image_tag: "v1.22.5" +kube_ovn_container_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn" +kube_ovn_container_image_tag: "{{ kube_ovn_version }}" +kube_ovn_dpdk_container_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn-dpdk" +kube_ovn_dpdk_container_image_tag: "{{ kube_ovn_dpdk_version }}" +kube_router_image_repo: "{{ docker_image_repo }}/cloudnativelabs/kube-router" +kube_router_image_tag: "{{ kube_router_version }}" +multus_image_repo: "{{ github_image_repo }}/k8snetworkplumbingwg/multus-cni" +multus_image_tag: "{{ multus_version }}" + +kube_vip_image_repo: "{{ github_image_repo }}/kube-vip/kube-vip" +kube_vip_image_tag: v0.5.5 +nginx_image_repo: "{{ docker_image_repo }}/library/nginx" +nginx_image_tag: 1.23.2-alpine +haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy" +haproxy_image_tag: 2.6.6-alpine + +# Coredns version should be supported by corefile-migration (or at least work with) +# bundle with kubeadm; if not 'basic' upgrade can sometimes fail + +coredns_version: "v1.9.3" +coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}" + +coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}" +coredns_image_tag: "{{ coredns_version if (coredns_image_is_namespaced | bool) else (coredns_version | regex_replace('^v', '')) }}" + +nodelocaldns_version: "1.21.1" +nodelocaldns_image_repo: "{{ kube_image_repo }}/dns/k8s-dns-node-cache" +nodelocaldns_image_tag: "{{ nodelocaldns_version }}" + +dnsautoscaler_version: 1.8.5 +dnsautoscaler_image_repo: "{{ kube_image_repo }}/cpa/cluster-proportional-autoscaler-{{ image_arch }}" +dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}" + +registry_version: "2.8.1" +registry_image_repo: "{{ docker_image_repo }}/library/registry" +registry_image_tag: "{{ registry_version }}" +metrics_server_version: "v0.6.2" +metrics_server_image_repo: "{{ kube_image_repo }}/metrics-server/metrics-server" +metrics_server_image_tag: "{{ metrics_server_version }}" +local_volume_provisioner_version: "v2.5.0" +local_volume_provisioner_image_repo: "{{ kube_image_repo }}/sig-storage/local-volume-provisioner" +local_volume_provisioner_image_tag: "{{ local_volume_provisioner_version }}" +cephfs_provisioner_version: "v2.1.0-k8s1.11" +cephfs_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/cephfs-provisioner" +cephfs_provisioner_image_tag: "{{ cephfs_provisioner_version }}" +rbd_provisioner_version: "v2.1.1-k8s1.11" +rbd_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/rbd-provisioner" +rbd_provisioner_image_tag: "{{ rbd_provisioner_version }}" +local_path_provisioner_version: "v0.0.22" +local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" +local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}" +ingress_nginx_version: "v1.5.1" +ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller" +ingress_nginx_controller_image_tag: "{{ ingress_nginx_version }}" +ingress_nginx_kube_webhook_certgen_imae_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen" +ingress_nginx_kube_webhook_certgen_imae_tag: "v1.3.0" +alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller" +alb_ingress_image_tag: "v1.1.9" +cert_manager_version: "v1.10.1" +cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller" +cert_manager_controller_image_tag: "{{ cert_manager_version }}" +cert_manager_cainjector_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-cainjector" +cert_manager_cainjector_image_tag: "{{ cert_manager_version }}" +cert_manager_webhook_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-webhook" +cert_manager_webhook_image_tag: "{{ cert_manager_version }}" + +csi_attacher_image_repo: "{{ kube_image_repo }}/sig-storage/csi-attacher" +csi_attacher_image_tag: "v3.3.0" +csi_provisioner_image_repo: "{{ kube_image_repo }}/sig-storage/csi-provisioner" +csi_provisioner_image_tag: "v3.0.0" +csi_snapshotter_image_repo: "{{ kube_image_repo }}/sig-storage/csi-snapshotter" +csi_snapshotter_image_tag: "v5.0.0" +csi_resizer_image_repo: "{{ kube_image_repo }}/sig-storage/csi-resizer" +csi_resizer_image_tag: "v1.3.0" +csi_node_driver_registrar_image_repo: "{{ kube_image_repo }}/sig-storage/csi-node-driver-registrar" +csi_node_driver_registrar_image_tag: "v2.4.0" +csi_livenessprobe_image_repo: "{{ kube_image_repo }}/sig-storage/livenessprobe" +csi_livenessprobe_image_tag: "v2.5.0" + +snapshot_controller_supported_versions: + v1.25: "v4.2.1" + v1.24: "v4.2.1" + v1.23: "v4.2.1" +snapshot_controller_image_repo: "{{ kube_image_repo }}/sig-storage/snapshot-controller" +snapshot_controller_image_tag: "{{ snapshot_controller_supported_versions[kube_major_version] }}" + +cinder_csi_plugin_version: "v1.22.0" +cinder_csi_plugin_image_repo: "{{ docker_image_repo }}/k8scloudprovider/cinder-csi-plugin" +cinder_csi_plugin_image_tag: "{{ cinder_csi_plugin_version }}" + +aws_ebs_csi_plugin_version: "v0.5.0" +aws_ebs_csi_plugin_image_repo: "{{ docker_image_repo }}/amazon/aws-ebs-csi-driver" +aws_ebs_csi_plugin_image_tag: "{{ aws_ebs_csi_plugin_version }}" + +gcp_pd_csi_plugin_version: "v1.4.0" +gcp_pd_csi_plugin_image_repo: "{{ kube_image_repo }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver" +gcp_pd_csi_plugin_image_tag: "{{ gcp_pd_csi_plugin_version }}" + +azure_csi_image_repo: "mcr.microsoft.com/oss/kubernetes-csi" +azure_csi_provisioner_image_tag: "v2.2.2" +azure_csi_attacher_image_tag: "v3.3.0" +azure_csi_resizer_image_tag: "v1.3.0" +azure_csi_livenessprobe_image_tag: "v2.5.0" +azure_csi_node_registrar_image_tag: "v2.4.0" +azure_csi_snapshotter_image_tag: "v3.0.3" +azure_csi_plugin_version: "v1.10.0" +azure_csi_plugin_image_repo: "mcr.microsoft.com/k8s/csi" +azure_csi_plugin_image_tag: "{{ azure_csi_plugin_version }}" + +gcp_pd_csi_image_repo: "gke.gcr.io" +gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" +gcp_pd_csi_provisioner_image_tag: "v1.5.0-gke.0" +gcp_pd_csi_attacher_image_tag: "v2.1.1-gke.0" +gcp_pd_csi_resizer_image_tag: "v0.4.0-gke.0" +gcp_pd_csi_registrar_image_tag: "v1.2.0-gke.0" + +dashboard_image_repo: "{{ docker_image_repo }}/kubernetesui/dashboard" +dashboard_image_tag: "v2.7.0" +dashboard_metrics_scraper_repo: "{{ docker_image_repo }}/kubernetesui/metrics-scraper" +dashboard_metrics_scraper_tag: "v1.0.8" + +metallb_speaker_image_repo: "{{ quay_image_repo }}/metallb/speaker" +metallb_controller_image_repo: "{{ quay_image_repo }}/metallb/controller" +metallb_version: v0.12.1 + +downloads: + netcheck_server: + enabled: "{{ deploy_netchecker }}" + container: true + repo: "{{ netcheck_server_image_repo }}" + tag: "{{ netcheck_server_image_tag }}" + sha256: "{{ netcheck_server_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + netcheck_agent: + enabled: "{{ deploy_netchecker }}" + container: true + repo: "{{ netcheck_agent_image_repo }}" + tag: "{{ netcheck_agent_image_tag }}" + sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + etcd: + container: "{{ etcd_deployment_type != 'host' }}" + file: "{{ etcd_deployment_type == 'host' }}" + enabled: true + version: "{{ etcd_version }}" + dest: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + repo: "{{ etcd_image_repo }}" + tag: "{{ etcd_image_tag }}" + sha256: >- + {{ etcd_binary_checksum if (etcd_deployment_type == 'host') + else etcd_digest_checksum|d(None) }} + url: "{{ etcd_download_url }}" + unarchive: "{{ etcd_deployment_type == 'host' }}" + owner: "root" + mode: "0755" + groups: + - etcd + + cni: + enabled: true + file: true + version: "{{ cni_version }}" + dest: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + sha256: "{{ cni_binary_checksum }}" + url: "{{ cni_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kubeadm: + enabled: true + file: true + version: "{{ kubeadm_version }}" + dest: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" + sha256: "{{ kubeadm_binary_checksum }}" + url: "{{ kubeadm_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kubelet: + enabled: true + file: true + version: "{{ kube_version }}" + dest: "{{ local_release_dir }}/kubelet-{{ kube_version }}-{{ image_arch }}" + sha256: "{{ kubelet_binary_checksum }}" + url: "{{ kubelet_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kubectl: + enabled: true + file: true + version: "{{ kube_version }}" + dest: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}" + sha256: "{{ kubectl_binary_checksum }}" + url: "{{ kubectl_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + crictl: + file: true + enabled: true + version: "{{ crictl_version }}" + dest: "{{ local_release_dir }}/crictl-{{ crictl_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ crictl_binary_checksum }}" + url: "{{ crictl_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + crio: + file: true + enabled: "{{ container_manager == 'crio' }}" + version: "{{ crio_version }}" + dest: "{{ local_release_dir }}/cri-o.{{ image_arch }}.{{ crio_version }}tar.gz" + sha256: "{{ crio_archive_checksum }}" + url: "{{ crio_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + cri_dockerd: + file: true + enabled: "{{ container_manager == 'docker' }}" + version: "{{ cri_dockerd_version }}" + dest: "{{ local_release_dir }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tar.gz" + sha256: "{{ cri_dockerd_archive_checksum }}" + url: "{{ cri_dockerd_download_url }}" + unarchive: true + unarchive_extra_opts: + - --strip=1 + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + crun: + file: true + enabled: "{{ crun_enabled }}" + version: "{{ crun_version }}" + dest: "{{ local_release_dir }}/crun" + sha256: "{{ crun_binary_checksum }}" + url: "{{ crun_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + youki: + file: true + enabled: "{{ youki_enabled }}" + version: "{{ youki_version }}" + dest: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz" + sha256: "{{ youki_archive_checksum }}" + url: "{{ youki_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + runc: + file: true + enabled: "{{ container_manager == 'containerd' }}" + version: "{{ runc_version }}" + dest: "{{ local_release_dir }}/runc" + sha256: "{{ runc_binary_checksum }}" + url: "{{ runc_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kata_containers: + enabled: "{{ kata_containers_enabled }}" + file: true + version: "{{ kata_containers_version }}" + dest: "{{ local_release_dir }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" + sha256: "{{ kata_containers_binary_checksum }}" + url: "{{ kata_containers_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + containerd: + enabled: "{{ container_manager == 'containerd' }}" + file: true + version: "{{ containerd_version }}" + dest: "{{ local_release_dir }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ containerd_archive_checksum }}" + url: "{{ containerd_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + gvisor_runsc: + enabled: "{{ gvisor_enabled }}" + file: true + version: "{{ gvisor_version }}" + dest: "{{ local_release_dir }}/gvisor-runsc" + sha256: "{{ gvisor_runsc_binary_checksum }}" + url: "{{ gvisor_runsc_download_url }}" + unarchive: false + owner: "root" + mode: 755 + groups: + - k8s_cluster + + gvisor_containerd_shim: + enabled: "{{ gvisor_enabled }}" + file: true + version: "{{ gvisor_version }}" + dest: "{{ local_release_dir }}/gvisor-containerd-shim-runsc-v1" + sha256: "{{ gvisor_containerd_shim_binary_checksum }}" + url: "{{ gvisor_containerd_shim_runsc_download_url }}" + unarchive: false + owner: "root" + mode: 755 + groups: + - k8s_cluster + + nerdctl: + file: true + enabled: "{{ container_manager == 'containerd' }}" + version: "{{ nerdctl_version }}" + dest: "{{ local_release_dir }}/nerdctl-{{ nerdctl_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ nerdctl_archive_checksum }}" + url: "{{ nerdctl_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + skopeo: + file: true + enabled: "{{ container_manager == 'crio' }}" + version: "{{ skopeo_version }}" + dest: "{{ local_release_dir }}/skopeo" + sha256: "{{ skopeo_binary_checksum }}" + url: "{{ skopeo_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + cilium: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + container: true + repo: "{{ cilium_image_repo }}" + tag: "{{ cilium_image_tag }}" + sha256: "{{ cilium_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_operator: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + container: true + repo: "{{ cilium_operator_image_repo }}" + tag: "{{ cilium_operator_image_tag }}" + sha256: "{{ cilium_operator_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_relay: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_relay_image_repo }}" + tag: "{{ cilium_hubble_relay_image_tag }}" + sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_certgen: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_certgen_image_repo }}" + tag: "{{ cilium_hubble_certgen_image_tag }}" + sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_ui: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_ui_image_repo }}" + tag: "{{ cilium_hubble_ui_image_tag }}" + sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_ui_backend: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_ui_backend_image_repo }}" + tag: "{{ cilium_hubble_ui_backend_image_tag }}" + sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_envoy: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_envoy_image_repo }}" + tag: "{{ cilium_hubble_envoy_image_tag }}" + sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + ciliumcli: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + file: true + version: "{{ cilium_cli_version }}" + dest: "{{ local_release_dir }}/cilium" + sha256: "{{ ciliumcli_binary_checksum }}" + url: "{{ ciliumcli_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + multus: + enabled: "{{ kube_network_plugin_multus }}" + container: true + repo: "{{ multus_image_repo }}" + tag: "{{ multus_image_tag }}" + sha256: "{{ multus_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + flannel: + enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ flannel_image_repo }}" + tag: "{{ flannel_image_tag }}" + sha256: "{{ flannel_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + flannel_init: + enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ flannel_init_image_repo }}" + tag: "{{ flannel_init_image_tag }}" + sha256: "{{ flannel_init_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calicoctl: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + file: true + version: "{{ calico_ctl_version }}" + dest: "{{ local_release_dir }}/calicoctl" + sha256: "{{ calicoctl_binary_checksum }}" + url: "{{ calicoctl_download_url }}" + mirrors: + - "{{ calicoctl_alternate_download_url }}" + - "{{ calicoctl_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + calico_node: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ calico_node_image_repo }}" + tag: "{{ calico_node_image_tag }}" + sha256: "{{ calico_node_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_cni: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ calico_cni_image_repo }}" + tag: "{{ calico_cni_image_tag }}" + sha256: "{{ calico_cni_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_flexvol: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ calico_flexvol_image_repo }}" + tag: "{{ calico_flexvol_image_tag }}" + sha256: "{{ calico_flexvol_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_policy: + enabled: "{{ enable_network_policy and kube_network_plugin in ['calico', 'canal'] }}" + container: true + repo: "{{ calico_policy_image_repo }}" + tag: "{{ calico_policy_image_tag }}" + sha256: "{{ calico_policy_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_typha: + enabled: "{{ typha_enabled }}" + container: true + repo: "{{ calico_typha_image_repo }}" + tag: "{{ calico_typha_image_tag }}" + sha256: "{{ calico_typha_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_apiserver: + enabled: "{{ calico_apiserver_enabled }}" + container: true + repo: "{{ calico_apiserver_image_repo }}" + tag: "{{ calico_apiserver_image_tag }}" + sha256: "{{ calico_apiserver_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_crds: + file: true + enabled: "{{ kube_network_plugin == 'calico' and calico_datastore == 'kdd' }}" + version: "{{ calico_version }}" + dest: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ calico_version }}.tar.gz" + sha256: "{{ calico_crds_archive_checksum }}" + url: "{{ calico_crds_download_url }}" + unarchive: true + unarchive_extra_opts: + - "{{ '--strip=6' if (calico_version is version('v3.22.3','<')) else '--strip=3' }}" + - "--wildcards" + - "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3','<')) else '*/libcalico-go/config/crd/' }}" + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + weave_kube: + enabled: "{{ kube_network_plugin == 'weave' }}" + container: true + repo: "{{ weave_kube_image_repo }}" + tag: "{{ weave_kube_image_tag }}" + sha256: "{{ weave_kube_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + weave_npc: + enabled: "{{ kube_network_plugin == 'weave' }}" + container: true + repo: "{{ weave_npc_image_repo }}" + tag: "{{ weave_npc_image_tag }}" + sha256: "{{ weave_npc_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + kube_ovn: + enabled: "{{ kube_network_plugin == 'kube-ovn' }}" + container: true + repo: "{{ kube_ovn_container_image_repo }}" + tag: "{{ kube_ovn_container_image_tag }}" + sha256: "{{ kube_ovn_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + kube_router: + enabled: "{{ kube_network_plugin == 'kube-router' }}" + container: true + repo: "{{ kube_router_image_repo }}" + tag: "{{ kube_router_image_tag }}" + sha256: "{{ kube_router_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + pod_infra: + enabled: true + container: true + repo: "{{ pod_infra_image_repo }}" + tag: "{{ pod_infra_image_tag }}" + sha256: "{{ pod_infra_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + kube-vip: + enabled: "{{ kube_vip_enabled }}" + container: true + repo: "{{ kube_vip_image_repo }}" + tag: "{{ kube_vip_image_tag }}" + sha256: "{{ kube_vip_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + nginx: + enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}" + container: true + repo: "{{ nginx_image_repo }}" + tag: "{{ nginx_image_tag }}" + sha256: "{{ nginx_digest_checksum|default(None) }}" + groups: + - kube_node + + haproxy: + enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}" + container: true + repo: "{{ haproxy_image_repo }}" + tag: "{{ haproxy_image_tag }}" + sha256: "{{ haproxy_digest_checksum|default(None) }}" + groups: + - kube_node + + coredns: + enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" + container: true + repo: "{{ coredns_image_repo }}" + tag: "{{ coredns_image_tag }}" + sha256: "{{ coredns_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + nodelocaldns: + enabled: "{{ enable_nodelocaldns }}" + container: true + repo: "{{ nodelocaldns_image_repo }}" + tag: "{{ nodelocaldns_image_tag }}" + sha256: "{{ nodelocaldns_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + dnsautoscaler: + enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" + container: true + repo: "{{ dnsautoscaler_image_repo }}" + tag: "{{ dnsautoscaler_image_tag }}" + sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + helm: + enabled: "{{ helm_enabled }}" + file: true + version: "{{ helm_version }}" + dest: "{{ local_release_dir }}/helm-{{ helm_version }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ helm_archive_checksum }}" + url: "{{ helm_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + krew: + enabled: "{{ krew_enabled }}" + file: true + version: "{{ krew_version }}" + dest: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz" + sha256: "{{ krew_archive_checksum }}" + url: "{{ krew_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + registry: + enabled: "{{ registry_enabled }}" + container: true + repo: "{{ registry_image_repo }}" + tag: "{{ registry_image_tag }}" + sha256: "{{ registry_digest_checksum|default(None) }}" + groups: + - kube_node + + metrics_server: + enabled: "{{ metrics_server_enabled }}" + container: true + repo: "{{ metrics_server_image_repo }}" + tag: "{{ metrics_server_image_tag }}" + sha256: "{{ metrics_server_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + local_volume_provisioner: + enabled: "{{ local_volume_provisioner_enabled }}" + container: true + repo: "{{ local_volume_provisioner_image_repo }}" + tag: "{{ local_volume_provisioner_image_tag }}" + sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + cephfs_provisioner: + enabled: "{{ cephfs_provisioner_enabled }}" + container: true + repo: "{{ cephfs_provisioner_image_repo }}" + tag: "{{ cephfs_provisioner_image_tag }}" + sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + rbd_provisioner: + enabled: "{{ rbd_provisioner_enabled }}" + container: true + repo: "{{ rbd_provisioner_image_repo }}" + tag: "{{ rbd_provisioner_image_tag }}" + sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + local_path_provisioner: + enabled: "{{ local_path_provisioner_enabled }}" + container: true + repo: "{{ local_path_provisioner_image_repo }}" + tag: "{{ local_path_provisioner_image_tag }}" + sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + ingress_nginx_controller: + enabled: "{{ ingress_nginx_enabled }}" + container: true + repo: "{{ ingress_nginx_controller_image_repo }}" + tag: "{{ ingress_nginx_controller_image_tag }}" + sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + ingress_alb_controller: + enabled: "{{ ingress_alb_enabled }}" + container: true + repo: "{{ alb_ingress_image_repo }}" + tag: "{{ alb_ingress_image_tag }}" + sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + cert_manager_controller: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_controller_image_repo }}" + tag: "{{ cert_manager_controller_image_tag }}" + sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + cert_manager_cainjector: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_cainjector_image_repo }}" + tag: "{{ cert_manager_cainjector_image_tag }}" + sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}" + groups: + - kube_node + + cert_manager_webhook: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_webhook_image_repo }}" + tag: "{{ cert_manager_webhook_image_tag }}" + sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_attacher: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_attacher_image_repo }}" + tag: "{{ csi_attacher_image_tag }}" + sha256: "{{ csi_attacher_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_provisioner: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_provisioner_image_repo }}" + tag: "{{ csi_provisioner_image_tag }}" + sha256: "{{ csi_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_snapshotter: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_snapshotter_image_repo }}" + tag: "{{ csi_snapshotter_image_tag }}" + sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}" + groups: + - kube_node + + snapshot_controller: + enabled: "{{ csi_snapshot_controller_enabled }}" + container: true + repo: "{{ snapshot_controller_image_repo }}" + tag: "{{ snapshot_controller_image_tag }}" + sha256: "{{ snapshot_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_resizer: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_resizer_image_repo }}" + tag: "{{ csi_resizer_image_tag }}" + sha256: "{{ csi_resizer_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_node_driver_registrar: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_node_driver_registrar_image_repo }}" + tag: "{{ csi_node_driver_registrar_image_tag }}" + sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}" + groups: + - kube_node + + cinder_csi_plugin: + enabled: "{{ cinder_csi_enabled }}" + container: true + repo: "{{ cinder_csi_plugin_image_repo }}" + tag: "{{ cinder_csi_plugin_image_tag }}" + sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}" + groups: + - kube_node + + aws_ebs_csi_plugin: + enabled: "{{ aws_ebs_csi_enabled }}" + container: true + repo: "{{ aws_ebs_csi_plugin_image_repo }}" + tag: "{{ aws_ebs_csi_plugin_image_tag }}" + sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}" + groups: + - kube_node + + dashboard: + enabled: "{{ dashboard_enabled }}" + container: true + repo: "{{ dashboard_image_repo }}" + tag: "{{ dashboard_image_tag }}" + sha256: "{{ dashboard_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + dashboard_metrics_scrapper: + enabled: "{{ dashboard_enabled }}" + container: true + repo: "{{ dashboard_metrics_scraper_repo }}" + tag: "{{ dashboard_metrics_scraper_tag }}" + sha256: "{{ dashboard_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + metallb_speaker: + enabled: "{{ metallb_speaker_enabled }}" + container: true + repo: "{{ metallb_speaker_image_repo }}" + tag: "{{ metallb_version }}" + sha256: "{{ metallb_speaker_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + metallb_controller: + enabled: "{{ metallb_enabled }}" + container: true + repo: "{{ metallb_controller_image_repo }}" + tag: "{{ metallb_version }}" + sha256: "{{ metallb_controller_digest_checksum|default(None) }}" + groups: + - kube_control_plane + +download_defaults: + container: false + file: false + repo: None + tag: None + enabled: false + dest: None + version: None + url: None + unarchive: false + owner: "{{ kube_owner }}" + mode: None diff --git a/kubespray/extra_playbooks/roles/download/meta/main.yml b/kubespray/extra_playbooks/roles/download/meta/main.yml new file mode 100644 index 0000000..61d3ffe --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/meta/main.yml @@ -0,0 +1,2 @@ +--- +allow_duplicates: true diff --git a/kubespray/extra_playbooks/roles/download/tasks/check_pull_required.yml b/kubespray/extra_playbooks/roles/download/tasks/check_pull_required.yml new file mode 100644 index 0000000..c2f9ead --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/check_pull_required.yml @@ -0,0 +1,25 @@ +--- +# The image_info_command depends on the Container Runtime and will output something like the following: +# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc... +- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell + shell: "{{ image_info_command }}" + register: docker_images + changed_when: false + check_mode: no + when: not download_always_pull + +- name: check_pull_required | Set pull_required if the desired image is not yet loaded + set_fact: + pull_required: >- + {%- if image_reponame | regex_replace('^docker\.io/(library/)?','') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} + when: not download_always_pull + +- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag + assert: + that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')" + when: + - not download_always_pull + - not pull_required + - pull_by_digest + tags: + - asserts diff --git a/kubespray/extra_playbooks/roles/download/tasks/download_container.yml b/kubespray/extra_playbooks/roles/download/tasks/download_container.yml new file mode 100644 index 0000000..41790fe --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/download_container.yml @@ -0,0 +1,125 @@ +--- +- block: + - name: set default values for flag variables + set_fact: + image_is_cached: false + image_changed: false + pull_required: "{{ download_always_pull }}" + tags: + - facts + + - name: download_container | Set a few facts + import_tasks: set_container_facts.yml + tags: + - facts + + - name: download_container | Prepare container download + include_tasks: check_pull_required.yml + when: + - not download_always_pull + + - debug: # noqa unnamed-task + msg: "Pull {{ image_reponame }} required is: {{ pull_required }}" + + - name: download_container | Determine if image is in cache + stat: + path: "{{ image_path_cached }}" + get_attributes: no + get_checksum: no + get_mime: no + delegate_to: localhost + connection: local + delegate_facts: no + register: cache_image + changed_when: false + become: false + when: + - download_force_cache + + - name: download_container | Set fact indicating if image is in cache + set_fact: + image_is_cached: "{{ cache_image.stat.exists }}" + tags: + - facts + when: + - download_force_cache + + - name: Stop if image not in cache on ansible host when download_force_cache=true + assert: + that: image_is_cached + msg: "Image cache file {{ image_path_cached }} not found for {{ image_reponame }} on localhost" + when: + - download_force_cache + - not download_run_once + + - name: download_container | Download image if required + command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}" + delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}" + delegate_facts: yes + run_once: "{{ download_run_once }}" + register: pull_task_result + until: pull_task_result is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 4 + become: "{{ user_can_become_root | default(false) or not download_localhost }}" + environment: "{{ proxy_env if container_manager == 'containerd' else omit }}" + when: + - pull_required or download_run_once + - not image_is_cached + + - name: download_container | Save and compress image + shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell + delegate_to: "{{ download_delegate }}" + delegate_facts: no + register: container_save_status + failed_when: container_save_status.stderr + run_once: true + become: "{{ user_can_become_root | default(false) or not download_localhost }}" + when: + - not image_is_cached + - download_run_once + + - name: download_container | Copy image to ansible host cache + synchronize: + src: "{{ image_path_final }}" + dest: "{{ image_path_cached }}" + use_ssh_args: true + mode: pull + when: + - not image_is_cached + - download_run_once + - not download_localhost + - download_delegate == inventory_hostname + + - name: download_container | Upload image to node if it is cached + synchronize: + src: "{{ image_path_cached }}" + dest: "{{ image_path_final }}" + use_ssh_args: true + mode: push + delegate_facts: no + register: upload_image + failed_when: not upload_image + until: upload_image is succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - pull_required + - download_force_cache + + - name: download_container | Load image into the local container registry + shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell + register: container_load_status + failed_when: container_load_status is failed + when: + - pull_required + - download_force_cache + + - name: download_container | Remove container image from cache + file: + state: absent + path: "{{ image_path_final }}" + when: + - not download_keep_remote_cache + tags: + - download diff --git a/kubespray/extra_playbooks/roles/download/tasks/download_file.yml b/kubespray/extra_playbooks/roles/download/tasks/download_file.yml new file mode 100644 index 0000000..376a15e --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/download_file.yml @@ -0,0 +1,141 @@ +--- +- block: + - name: prep_download | Set a few facts + set_fact: + download_force_cache: "{{ true if download_run_once else download_force_cache }}" + + - name: download_file | Starting download of file + debug: + msg: "{{ download.url }}" + run_once: "{{ download_run_once }}" + + - name: download_file | Set pathname of cached file + set_fact: + file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}" + tags: + - facts + + - name: download_file | Create dest directory on node + file: + path: "{{ download.dest | dirname }}" + owner: "{{ download.owner | default(omit) }}" + mode: 0755 + state: directory + recurse: yes + + - name: download_file | Create local cache directory + file: + path: "{{ file_path_cached | dirname }}" + state: directory + recurse: yes + delegate_to: localhost + connection: local + delegate_facts: false + run_once: true + become: false + when: + - download_force_cache + tags: + - localhost + + - name: download_file | Create cache directory on download_delegate host + file: + path: "{{ file_path_cached | dirname }}" + state: directory + recurse: yes + delegate_to: "{{ download_delegate }}" + delegate_facts: false + run_once: true + when: + - download_force_cache + - not download_localhost + + # We check a number of mirrors that may hold the file and pick a working one at random + # This task will avoid logging it's parameters to not leak environment passwords in the log + - name: download_file | Validate mirrors + uri: + url: "{{ mirror }}" + method: HEAD + validate_certs: "{{ download_validate_certs }}" + url_username: "{{ download.username | default(omit) }}" + url_password: "{{ download.password | default(omit) }}" + force_basic_auth: "{{ download.force_basic_auth | default(omit) }}" + delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" + run_once: "{{ download_force_cache }}" + register: uri_result + until: uri_result is success + retries: 4 + delay: "{{ retry_stagger | default(5) }}" + environment: "{{ proxy_env }}" + no_log: "{{ not (unsafe_show_logs|bool) }}" + loop: "{{ download.mirrors | default([download.url]) }}" + loop_control: + loop_var: mirror + ignore_errors: true + + # Ansible 2.9 requires we convert a generator to a list + - name: download_file | Get the list of working mirrors + set_fact: + valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}" + delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" + + # This must always be called, to check if the checksum matches. On no-match the file is re-downloaded. + # This task will avoid logging it's parameters to not leak environment passwords in the log + - name: download_file | Download item + get_url: + url: "{{ valid_mirror_urls | random }}" + dest: "{{ file_path_cached if download_force_cache else download.dest }}" + owner: "{{ omit if download_localhost else (download.owner | default(omit)) }}" + mode: "{{ omit if download_localhost else (download.mode | default(omit)) }}" + checksum: "{{ 'sha256:' + download.sha256 if download.sha256 else omit }}" + validate_certs: "{{ download_validate_certs }}" + url_username: "{{ download.username | default(omit) }}" + url_password: "{{ download.password | default(omit) }}" + force_basic_auth: "{{ download.force_basic_auth | default(omit) }}" + delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" + run_once: "{{ download_force_cache }}" + register: get_url_result + become: "{{ not download_localhost }}" + until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg" + retries: 4 + delay: "{{ retry_stagger | default(5) }}" + environment: "{{ proxy_env }}" + no_log: "{{ not (unsafe_show_logs|bool) }}" + + - name: download_file | Copy file back to ansible host file cache + synchronize: + src: "{{ file_path_cached }}" + dest: "{{ file_path_cached }}" + use_ssh_args: true + mode: pull + when: + - download_force_cache + - not download_localhost + - download_delegate == inventory_hostname + + - name: download_file | Copy file from cache to nodes, if it is available + synchronize: + src: "{{ file_path_cached }}" + dest: "{{ download.dest }}" + use_ssh_args: true + mode: push + register: get_task + until: get_task is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 4 + when: + - download_force_cache + + - name: download_file | Set mode and owner + file: + path: "{{ download.dest }}" + mode: "{{ download.mode | default(omit) }}" + owner: "{{ download.owner | default(omit) }}" + when: + - download_force_cache + + - name: "download_file | Extract file archives" + include_tasks: "extract_file.yml" + + tags: + - download diff --git a/kubespray/extra_playbooks/roles/download/tasks/extract_file.yml b/kubespray/extra_playbooks/roles/download/tasks/extract_file.yml new file mode 100644 index 0000000..81858dd --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/extract_file.yml @@ -0,0 +1,11 @@ +--- +- name: extract_file | Unpacking archive + unarchive: + src: "{{ download.dest }}" + dest: "{{ download.dest | dirname }}" + owner: "{{ download.owner | default(omit) }}" + mode: "{{ download.mode | default(omit) }}" + copy: no + extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}" + when: + - download.unarchive | default(false) diff --git a/kubespray/extra_playbooks/roles/download/tasks/main.yml b/kubespray/extra_playbooks/roles/download/tasks/main.yml new file mode 100644 index 0000000..536c293 --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: download | Prepare working directories and variables + import_tasks: prep_download.yml + when: + - not skip_downloads|default(false) + tags: + - download + - upload + +- name: download | Get kubeadm binary and list of required images + include_tasks: prep_kubeadm_images.yml + when: + - not skip_downloads|default(false) + - inventory_hostname in groups['kube_control_plane'] + tags: + - download + - upload + +- name: download | Download files / images + include_tasks: "{{ include_file }}" + loop: "{{ downloads | combine(kubeadm_images) | dict2items }}" + vars: + download: "{{ download_defaults | combine(item.value) }}" + include_file: "download_{% if download.container %}container{% else %}file{% endif %}.yml" + when: + - not skip_downloads | default(false) + - download.enabled + - item.value.enabled + - (not (item.value.container | default(false))) or (item.value.container and download_container) + - (download_run_once and inventory_hostname == download_delegate) or (group_names | intersect(download.groups) | length) diff --git a/kubespray/extra_playbooks/roles/download/tasks/prep_download.yml b/kubespray/extra_playbooks/roles/download/tasks/prep_download.yml new file mode 100644 index 0000000..9419f24 --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/prep_download.yml @@ -0,0 +1,92 @@ +--- +- name: prep_download | Set a few facts + set_fact: + download_force_cache: "{{ true if download_run_once else download_force_cache }}" + tags: + - facts + +- name: prep_download | On localhost, check if passwordless root is possible + command: "true" + delegate_to: localhost + connection: local + run_once: true + register: test_become + changed_when: false + ignore_errors: true # noqa ignore-errors + become: true + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | On localhost, check if user has access to the container runtime without using sudo + shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell + delegate_to: localhost + connection: local + run_once: true + register: test_docker + changed_when: false + ignore_errors: true # noqa ignore-errors + become: false + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | Parse the outputs of the previous commands + set_fact: + user_in_docker_group: "{{ not test_docker.failed }}" + user_can_become_root: "{{ not test_become.failed }}" + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | Check that local user is in group or can become root + assert: + that: "user_in_docker_group or user_can_become_root" + msg: >- + Error: User is not in docker group and cannot become root. When download_localhost is true, at least one of these two conditions must be met. + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | Register docker images info + shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell + no_log: "{{ not (unsafe_show_logs|bool) }}" + register: docker_images + failed_when: false + changed_when: false + check_mode: no + when: download_container + +- name: prep_download | Create staging directory on remote node + file: + path: "{{ local_release_dir }}/images" + state: directory + recurse: yes + mode: 0755 + owner: "{{ ansible_ssh_user | default(ansible_user_id) }}" + when: + - ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: prep_download | Create local cache for files and images on control node + file: + path: "{{ download_cache_dir }}/images" + state: directory + recurse: yes + mode: 0755 + delegate_to: localhost + connection: local + delegate_facts: no + run_once: true + become: false + when: + - download_force_cache + tags: + - localhost diff --git a/kubespray/extra_playbooks/roles/download/tasks/prep_kubeadm_images.yml b/kubespray/extra_playbooks/roles/download/tasks/prep_kubeadm_images.yml new file mode 100644 index 0000000..aa21849 --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/prep_kubeadm_images.yml @@ -0,0 +1,71 @@ +--- +- name: prep_kubeadm_images | Check kubeadm version matches kubernetes version + fail: + msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}" + when: + - not skip_downloads | default(false) + - not kubeadm_version == downloads.kubeadm.version + +- name: prep_kubeadm_images | Download kubeadm binary + include_tasks: "download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.kubeadm) }}" + when: + - not skip_downloads | default(false) + - downloads.kubeadm.enabled + +- name: prep_kubeadm_images | Create kubeadm config + template: + src: "kubeadm-images.yaml.j2" + dest: "{{ kube_config_dir }}/kubeadm-images.yaml" + mode: 0644 + when: + - not skip_kubeadm_images|default(false) + +- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path + copy: + src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubeadm" + mode: 0755 + remote_src: true + +- name: prep_kubeadm_images | Set kubeadm binary permissions + file: + path: "{{ bin_dir }}/kubeadm" + mode: "0755" + state: file + +- name: prep_kubeadm_images | Generate list of required images + shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'" + args: + executable: /bin/bash + register: kubeadm_images_raw + run_once: true + changed_when: false + when: + - not skip_kubeadm_images|default(false) + +- name: prep_kubeadm_images | Parse list of images + vars: + kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}" + set_fact: + kubeadm_image: + key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*','')).split(':')[0] }}" + value: + enabled: true + container: true + repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}" + tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}" + groups: k8s_cluster + loop: "{{ kubeadm_images_list | flatten(levels=1) }}" + register: kubeadm_images_cooked + run_once: true + when: + - not skip_kubeadm_images|default(false) + +- name: prep_kubeadm_images | Convert list of images to dict for later use + set_fact: + kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}" + run_once: true + when: + - not skip_kubeadm_images|default(false) diff --git a/kubespray/extra_playbooks/roles/download/tasks/set_container_facts.yml b/kubespray/extra_playbooks/roles/download/tasks/set_container_facts.yml new file mode 100644 index 0000000..9d36c24 --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/tasks/set_container_facts.yml @@ -0,0 +1,55 @@ +--- +- name: set_container_facts | Display the name of the image being processed + debug: + msg: "{{ download.repo }}" + +- name: set_container_facts | Set if containers should be pulled by digest + set_fact: + pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}" + +- name: set_container_facts | Define by what name to pull the image + set_fact: + image_reponame: >- + {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%} + +- name: set_container_facts | Define file name of image + set_fact: + image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar" + +- name: set_container_facts | Define path of image + set_fact: + image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}" + image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}" + +- name: Set image save/load command for docker + set_fact: + image_save_command: "{{ docker_bin_dir }}/docker save {{ image_reponame }} | gzip -{{ download_compress }} > {{ image_path_final }}" + image_load_command: "{{ docker_bin_dir }}/docker load < {{ image_path_final }}" + when: container_manager == 'docker' + +- name: Set image save/load command for containerd + set_fact: + image_save_command: "{{ bin_dir }}/nerdctl -n k8s.io image save -o {{ image_path_final }} {{ image_reponame }}" + image_load_command: "{{ bin_dir }}/nerdctl -n k8s.io image load < {{ image_path_final }}" + when: container_manager == 'containerd' + +- name: Set image save/load command for crio + set_fact: + image_save_command: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null" + image_load_command: "{{ bin_dir }}/skopeo copy docker-archive:{{ image_path_final }} containers-storage:{{ image_reponame }} 2>/dev/null" + when: container_manager == 'crio' + +- name: Set image save/load command for docker on localhost + set_fact: + image_save_command_on_localhost: "{{ docker_bin_dir }}/docker save {{ image_reponame }} | gzip -{{ download_compress }} > {{ image_path_cached }}" + when: container_manager_on_localhost == 'docker' + +- name: Set image save/load command for containerd on localhost + set_fact: + image_save_command_on_localhost: "{{ containerd_bin_dir }}/ctr -n k8s.io image export --platform linux/{{ image_arch }} {{ image_path_cached }} {{ image_reponame }}" + when: container_manager_on_localhost == 'containerd' + +- name: Set image save/load command for crio on localhost + set_fact: + image_save_command_on_localhost: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null" + when: container_manager_on_localhost == 'crio' diff --git a/kubespray/extra_playbooks/roles/download/templates/kubeadm-images.yaml.j2 b/kubespray/extra_playbooks/roles/download/templates/kubeadm-images.yaml.j2 new file mode 100644 index 0000000..3a9121d --- /dev/null +++ b/kubespray/extra_playbooks/roles/download/templates/kubeadm-images.yaml.j2 @@ -0,0 +1,25 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +nodeRegistration: + criSocket: {{ cri_socket }} +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +imageRepository: {{ kube_image_repo }} +kubernetesVersion: {{ kube_version }} +etcd: +{% if etcd_deployment_type == "kubeadm" %} + local: + imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}" + imageTag: "{{ etcd_image_tag }}" +{% else %} + external: + endpoints: +{% for endpoint in etcd_access_addresses.split(',') %} + - {{ endpoint }} +{% endfor %} +{% endif %} +dns: + type: CoreDNS + imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }} + imageTag: {{ coredns_image_tag }} diff --git a/kubespray/extra_playbooks/roles/etcd/defaults/main.yml b/kubespray/extra_playbooks/roles/etcd/defaults/main.yml new file mode 100644 index 0000000..bf38ace --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/defaults/main.yml @@ -0,0 +1,122 @@ +--- +# Set etcd user +etcd_owner: etcd + +# Set to false to only do certificate management +etcd_cluster_setup: true +etcd_events_cluster_setup: false + +# Set to true to separate k8s events to a different etcd cluster +etcd_events_cluster_enabled: false + +etcd_backup_prefix: "/var/backups" +etcd_data_dir: "/var/lib/etcd" + +# Number of etcd backups to retain. Set to a value < 0 to retain all backups +etcd_backup_retention_count: -1 + +force_etcd_cert_refresh: true +etcd_config_dir: /etc/ssl/etcd +etcd_cert_dir: "{{ etcd_config_dir }}/ssl" +etcd_cert_dir_mode: "0700" +etcd_cert_group: root +# Note: This does not set up DNS entries. It simply adds the following DNS +# entries to the certificate +etcd_cert_alt_names: + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" + - "etcd" +etcd_cert_alt_ips: [] + +etcd_script_dir: "{{ bin_dir }}/etcd-scripts" + +etcd_heartbeat_interval: "250" +etcd_election_timeout: "5000" + +# etcd_snapshot_count: "10000" + +etcd_metrics: "basic" + +# Define in inventory to set a separate port for etcd to expose metrics on +# etcd_metrics_port: 2381 + +## A dictionary of extra environment variables to add to etcd.env, formatted like: +## etcd_extra_vars: +## ETCD_VAR1: "value1" +## ETCD_VAR2: "value2" +etcd_extra_vars: {} + +# Limits +# Limit memory only if <4GB memory on host. 0=unlimited +# This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}" + +# The default storage size limit is 2G. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +# Uncomment to set CPU share for etcd +# etcd_cpu_limit: 300m + +etcd_blkio_weight: 1000 + +etcd_node_cert_hosts: "{{ groups['k8s_cluster'] }}" + +etcd_compaction_retention: "8" + +# Force clients like etcdctl to use TLS certs (different than peer security) +etcd_secure_client: true + +# Enable peer client cert authentication +etcd_peer_client_auth: true + +# Maximum number of snapshot files to retain (0 is unlimited) +# etcd_max_snapshots: 5 + +# Maximum number of wal files to retain (0 is unlimited) +# etcd_max_wals: 5 + +# Number of loop retries +etcd_retries: 4 + +## Support tls cipher suites. +# etcd_tls_cipher_suites: {} +# - TLS_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + +# ETCD 3.5.x issue +# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer +etcd_experimental_initial_corrupt_check: true + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false diff --git a/kubespray/extra_playbooks/roles/etcd/handlers/backup.yml b/kubespray/extra_playbooks/roles/etcd/handlers/backup.yml new file mode 100644 index 0000000..d848cdb --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/handlers/backup.yml @@ -0,0 +1,62 @@ +--- +- name: Backup etcd data + command: /bin/true + notify: + - Refresh Time Fact + - Set Backup Directory + - Create Backup Directory + - Stat etcd v2 data directory + - Backup etcd v2 data + - Backup etcd v3 data + when: etcd_cluster_is_healthy.rc == 0 + +- name: Refresh Time Fact + setup: filter=ansible_date_time + +- name: Set Backup Directory + set_fact: + etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}" + +- name: Create Backup Directory + file: + path: "{{ etcd_backup_directory }}" + state: directory + owner: root + group: root + mode: 0600 + +- name: Stat etcd v2 data directory + stat: + path: "{{ etcd_data_dir }}/member" + get_attributes: no + get_checksum: no + get_mime: no + register: etcd_data_dir_member + +- name: Backup etcd v2 data + when: etcd_data_dir_member.stat.exists + command: >- + {{ bin_dir }}/etcdctl backup + --data-dir {{ etcd_data_dir }} + --backup-dir {{ etcd_backup_directory }} + environment: + ETCDCTL_API: 2 + retries: 3 + register: backup_v2_command + until: backup_v2_command.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + +- name: Backup etcd v3 data + command: >- + {{ bin_dir }}/etcdctl + snapshot save {{ etcd_backup_directory }}/snapshot.db + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + retries: 3 + register: etcd_backup_v3_command + until: etcd_backup_v3_command.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" diff --git a/kubespray/extra_playbooks/roles/etcd/handlers/backup_cleanup.yml b/kubespray/extra_playbooks/roles/etcd/handlers/backup_cleanup.yml new file mode 100644 index 0000000..e670f46 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/handlers/backup_cleanup.yml @@ -0,0 +1,11 @@ +--- +- name: Cleanup etcd backups + command: /bin/true + notify: + - Remove old etcd backups + +- name: Remove old etcd backups + shell: + chdir: "{{ etcd_backup_prefix }}" + cmd: "find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf" + when: etcd_backup_retention_count >= 0 diff --git a/kubespray/extra_playbooks/roles/etcd/handlers/main.yml b/kubespray/extra_playbooks/roles/etcd/handlers/main.yml new file mode 100644 index 0000000..ccf8f8f --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/handlers/main.yml @@ -0,0 +1,62 @@ +--- +- name: restart etcd + command: /bin/true + notify: + - Backup etcd data + - etcd | reload systemd + - reload etcd + - wait for etcd up + - Cleanup etcd backups + +- name: restart etcd-events + command: /bin/true + notify: + - etcd | reload systemd + - reload etcd-events + - wait for etcd-events up + +- import_tasks: backup.yml + +- name: etcd | reload systemd + systemd: + daemon_reload: true + +- name: reload etcd + service: + name: etcd + state: restarted + when: is_etcd_master + +- name: reload etcd-events + service: + name: etcd-events + state: restarted + when: is_etcd_master + +- name: wait for etcd up + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" + validate_certs: no + client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" + client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" + register: result + until: result.status is defined and result.status == 200 + retries: 60 + delay: 1 + +- import_tasks: backup_cleanup.yml + +- name: wait for etcd-events up + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health" + validate_certs: no + client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" + client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" + register: result + until: result.status is defined and result.status == 200 + retries: 60 + delay: 1 + +- name: set etcd_secret_changed + set_fact: + etcd_secret_changed: true diff --git a/kubespray/extra_playbooks/roles/etcd/meta/main.yml b/kubespray/extra_playbooks/roles/etcd/meta/main.yml new file mode 100644 index 0000000..e996646 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: adduser + user: "{{ addusers.etcd }}" + when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) + - role: adduser + user: "{{ addusers.kube }}" + when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/check_certs.yml b/kubespray/extra_playbooks/roles/etcd/tasks/check_certs.yml new file mode 100644 index 0000000..c688c16 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/check_certs.yml @@ -0,0 +1,169 @@ +--- +- name: "Check_certs | Register certs that have already been generated on first etcd node" + find: + paths: "{{ etcd_cert_dir }}" + patterns: "ca.pem,node*.pem,member*.pem,admin*.pem" + get_checksum: true + delegate_to: "{{ groups['etcd'][0] }}" + register: etcdcert_master + run_once: true + +- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs' and 'etcd_secret_changed' to false" + set_fact: + sync_certs: false + gen_certs: false + etcd_secret_changed: false + +- name: "Check certs | Register ca and etcd admin/member certs on etcd hosts" + stat: + path: "{{ etcd_cert_dir }}/{{ item }}" + get_attributes: no + get_checksum: yes + get_mime: no + register: etcd_member_certs + when: inventory_hostname in groups['etcd'] + with_items: + - ca.pem + - member-{{ inventory_hostname }}.pem + - member-{{ inventory_hostname }}-key.pem + - admin-{{ inventory_hostname }}.pem + - admin-{{ inventory_hostname }}-key.pem + +- name: "Check certs | Register ca and etcd node certs on kubernetes hosts" + stat: + path: "{{ etcd_cert_dir }}/{{ item }}" + register: etcd_node_certs + when: inventory_hostname in groups['k8s_cluster'] + with_items: + - ca.pem + - node-{{ inventory_hostname }}.pem + - node-{{ inventory_hostname }}-key.pem + +- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)" + set_fact: + gen_certs: true + when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list + run_once: true + with_items: "{{ expected_files }}" + vars: + expected_files: >- + ['{{ etcd_cert_dir }}/ca.pem', + {% set etcd_members = groups['etcd'] %} + {% for host in etcd_members %} + '{{ etcd_cert_dir }}/admin-{{ host }}.pem', + '{{ etcd_cert_dir }}/admin-{{ host }}-key.pem', + '{{ etcd_cert_dir }}/member-{{ host }}.pem', + '{{ etcd_cert_dir }}/member-{{ host }}-key.pem', + {% endfor %} + {% set k8s_nodes = groups['kube_control_plane'] %} + {% for host in k8s_nodes %} + '{{ etcd_cert_dir }}/node-{{ host }}.pem', + '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' + {% if not loop.last %}{{','}}{% endif %} + {% endfor %}] + +- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)" + set_fact: + gen_certs: true + run_once: true + with_items: "{{ expected_files }}" + vars: + expected_files: >- + ['{{ etcd_cert_dir }}/ca.pem', + {% set etcd_members = groups['etcd'] %} + {% for host in etcd_members %} + '{{ etcd_cert_dir }}/admin-{{ host }}.pem', + '{{ etcd_cert_dir }}/admin-{{ host }}-key.pem', + '{{ etcd_cert_dir }}/member-{{ host }}.pem', + '{{ etcd_cert_dir }}/member-{{ host }}-key.pem', + {% endfor %} + {% set k8s_nodes = groups['k8s_cluster']|unique|sort %} + {% for host in k8s_nodes %} + '{{ etcd_cert_dir }}/node-{{ host }}.pem', + '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' + {% if not loop.last %}{{','}}{% endif %} + {% endfor %}] + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list + +- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node" + set_fact: + gen_master_certs: |- + { + {% set etcd_members = groups['etcd'] -%} + {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} + {% for host in etcd_members -%} + {% set member_cert = "%s/member-%s.pem"|format(etcd_cert_dir, host) %} + {% set member_key = "%s/member-%s-key.pem"|format(etcd_cert_dir, host) %} + {% set admin_cert = "%s/admin-%s.pem"|format(etcd_cert_dir, host) %} + {% set admin_key = "%s/admin-%s-key.pem"|format(etcd_cert_dir, host) %} + {% if force_etcd_cert_refresh -%} + "{{ host }}": True, + {% elif member_cert in existing_certs and member_key in existing_certs and admin_cert in existing_certs and admin_key in existing_certs -%} + "{{ host }}": False, + {% else -%} + "{{ host }}": True, + {% endif -%} + {% endfor %} + } + run_once: true + +- name: "Check_certs | Set 'gen_node_certs' object to track whether node certs exist on first etcd node" + set_fact: + gen_node_certs: |- + { + {% set k8s_nodes = groups['k8s_cluster'] -%} + {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} + {% for host in k8s_nodes -%} + {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %} + {% set host_key = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %} + {% if force_etcd_cert_refresh -%} + "{{ host }}": True, + {% elif host_cert in existing_certs and host_key in existing_certs -%} + "{{ host }}": False, + {% else -%} + "{{ host }}": True, + {% endif -%} + {% endfor %} + } + run_once: true + +- name: "Check_certs | Set 'etcd_member_requires_sync' to true if ca or member/admin cert and key don't exist on etcd member or checksum doesn't match" + set_fact: + etcd_member_requires_sync: true + when: + - inventory_hostname in groups['etcd'] + - (not etcd_member_certs.results[0].stat.exists|default(false)) or + (not etcd_member_certs.results[1].stat.exists|default(false)) or + (not etcd_member_certs.results[2].stat.exists|default(false)) or + (not etcd_member_certs.results[3].stat.exists|default(false)) or + (not etcd_member_certs.results[4].stat.exists|default(false)) or + (etcd_member_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[3].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[3].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[4].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[4].stat.path)|map(attribute="checksum")|first|default('')) + +- name: "Check_certs | Set 'kubernetes_host_requires_sync' to true if ca or node cert and key don't exist on kubernetes host or checksum doesn't match" + set_fact: + kubernetes_host_requires_sync: true + when: + - inventory_hostname in groups['k8s_cluster'] and + inventory_hostname not in groups['etcd'] + - (not etcd_node_certs.results[0].stat.exists|default(false)) or + (not etcd_node_certs.results[1].stat.exists|default(false)) or + (not etcd_node_certs.results[2].stat.exists|default(false)) or + (etcd_node_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_node_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_node_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) + +- name: "Check_certs | Set 'sync_certs' to true" + set_fact: + sync_certs: true + when: + - etcd_member_requires_sync|default(false) or + kubernetes_host_requires_sync|default(false) or + (inventory_hostname in gen_master_certs and gen_master_certs[inventory_hostname]) or + (inventory_hostname in gen_node_certs and gen_node_certs[inventory_hostname]) diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/configure.yml b/kubespray/extra_playbooks/roles/etcd/tasks/configure.yml new file mode 100644 index 0000000..7534e41 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/configure.yml @@ -0,0 +1,168 @@ +--- +- name: Configure | Check if etcd cluster is healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_cluster_is_healthy + failed_when: false + changed_when: false + check_mode: no + run_once: yes + when: is_etcd_master and etcd_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Check if etcd-events cluster is healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_events_cluster_is_healthy + failed_when: false + changed_when: false + check_mode: no + run_once: yes + when: is_etcd_master and etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- include_tasks: refresh_config.yml + when: is_etcd_master + +- name: Configure | Copy etcd.service systemd file + template: + src: "etcd-{{ etcd_deployment_type }}.service.j2" + dest: /etc/systemd/system/etcd.service + backup: yes + mode: 0644 + when: is_etcd_master and etcd_cluster_setup + +- name: Configure | Copy etcd-events.service systemd file + template: + src: "etcd-events-{{ etcd_deployment_type }}.service.j2" + dest: /etc/systemd/system/etcd-events.service + backup: yes + mode: 0644 + when: is_etcd_master and etcd_events_cluster_setup + +- name: Configure | reload systemd + systemd: + daemon_reload: true + when: is_etcd_master + +# when scaling new etcd will fail to start +- name: Configure | Ensure etcd is running + service: + name: etcd + state: started + enabled: yes + ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors + when: is_etcd_master and etcd_cluster_setup + +# when scaling new etcd will fail to start +- name: Configure | Ensure etcd-events is running + service: + name: etcd-events + state: started + enabled: yes + ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors + when: is_etcd_master and etcd_events_cluster_setup + +- name: Configure | Wait for etcd cluster to be healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_cluster_is_healthy + until: etcd_cluster_is_healthy.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + check_mode: no + run_once: yes + when: + - is_etcd_master + - etcd_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Wait for etcd-events cluster to be healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_events_cluster_is_healthy + until: etcd_events_cluster_is_healthy.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + check_mode: no + run_once: yes + when: + - is_etcd_master + - etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- name: Configure | Check if member is in etcd cluster + shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}" + register: etcd_member_in_cluster + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + when: is_etcd_master and etcd_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Check if member is in etcd-events cluster + shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}" + register: etcd_events_member_in_cluster + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + when: is_etcd_master and etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- name: Configure | Join member(s) to etcd cluster one at a time + include_tasks: join_etcd_member.yml + with_items: "{{ groups['etcd'] }}" + when: inventory_hostname == item and etcd_cluster_setup and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0 + +- name: Configure | Join member(s) to etcd-events cluster one at a time + include_tasks: join_etcd-events_member.yml + with_items: "{{ groups['etcd'] }}" + when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0 diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/gen_certs_script.yml b/kubespray/extra_playbooks/roles/etcd/tasks/gen_certs_script.yml new file mode 100644 index 0000000..eb97a82 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/gen_certs_script.yml @@ -0,0 +1,166 @@ +--- +- name: Gen_certs | create etcd cert dir + file: + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: "{{ etcd_owner }}" + mode: "{{ etcd_cert_dir_mode }}" + recurse: yes + +- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})" + file: + path: "{{ etcd_script_dir }}" + state: directory + owner: root + mode: 0700 + run_once: yes + when: inventory_hostname == groups['etcd'][0] + +- name: Gen_certs | write openssl config + template: + src: "openssl.conf.j2" + dest: "{{ etcd_config_dir }}/openssl.conf" + mode: 0640 + run_once: yes + delegate_to: "{{ groups['etcd'][0] }}" + when: + - gen_certs|default(false) + - inventory_hostname == groups['etcd'][0] + +- name: Gen_certs | copy certs generation script + template: + src: "make-ssl-etcd.sh.j2" + dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh" + mode: 0700 + run_once: yes + when: + - gen_certs|default(false) + - inventory_hostname == groups['etcd'][0] + +- name: Gen_certs | run cert generation script for etcd and kube control plane nodes + command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" + environment: + - MASTERS: "{% for m in groups['etcd'] %} + {% if gen_master_certs[m] %} + {{ m }} + {% endif %} + {% endfor %}" + - HOSTS: "{% for h in groups['kube_control_plane'] %} + {% if gen_node_certs[h] %} + {{ h }} + {% endif %} + {% endfor %}" + run_once: yes + delegate_to: "{{ groups['etcd'][0] }}" + when: gen_certs|default(false) + notify: set etcd_secret_changed + +- name: Gen_certs | run cert generation script for all clients + command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" + environment: + - HOSTS: "{% for h in groups['k8s_cluster'] %} + {% if gen_node_certs[h] %} + {{ h }} + {% endif %} + {% endfor %}" + run_once: yes + delegate_to: "{{ groups['etcd'][0] }}" + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - gen_certs|default(false) + notify: set etcd_secret_changed + +- name: Gen_certs | Gather etcd member/admin and kube_control_plane clinet certs from first etcd node + slurp: + src: "{{ item }}" + register: etcd_master_certs + with_items: + - "{{ etcd_cert_dir }}/ca.pem" + - "{{ etcd_cert_dir }}/ca-key.pem" + - "[{% for node in groups['etcd'] %} + '{{ etcd_cert_dir }}/admin-{{ node }}.pem', + '{{ etcd_cert_dir }}/admin-{{ node }}-key.pem', + '{{ etcd_cert_dir }}/member-{{ node }}.pem', + '{{ etcd_cert_dir }}/member-{{ node }}-key.pem', + {% endfor %}]" + - "[{% for node in (groups['kube_control_plane']) %} + '{{ etcd_cert_dir }}/node-{{ node }}.pem', + '{{ etcd_cert_dir }}/node-{{ node }}-key.pem', + {% endfor %}]" + delegate_to: "{{ groups['etcd'][0] }}" + when: + - inventory_hostname in groups['etcd'] + - sync_certs|default(false) + - inventory_hostname != groups['etcd'][0] + notify: set etcd_secret_changed + +- name: Gen_certs | Write etcd member/admin and kube_control_plane clinet certs to other etcd nodes + copy: + dest: "{{ item.item }}" + content: "{{ item.content | b64decode }}" + group: "{{ etcd_cert_group }}" + owner: "{{ etcd_owner }}" + mode: 0640 + with_items: "{{ etcd_master_certs.results }}" + when: + - inventory_hostname in groups['etcd'] + - sync_certs|default(false) + - inventory_hostname != groups['etcd'][0] + loop_control: + label: "{{ item.item }}" + +- name: Gen_certs | Gather node certs from first etcd node + slurp: + src: "{{ item }}" + register: etcd_master_node_certs + with_items: + - "[{% for node in groups['k8s_cluster'] %} + '{{ etcd_cert_dir }}/node-{{ node }}.pem', + '{{ etcd_cert_dir }}/node-{{ node }}-key.pem', + {% endfor %}]" + delegate_to: "{{ groups['etcd'][0] }}" + when: + - inventory_hostname in groups['etcd'] + - inventory_hostname != groups['etcd'][0] + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + notify: set etcd_secret_changed + +- name: Gen_certs | Write node certs to other etcd nodes + copy: + dest: "{{ item.item }}" + content: "{{ item.content | b64decode }}" + group: "{{ etcd_cert_group }}" + owner: "{{ etcd_owner }}" + mode: 0640 + with_items: "{{ etcd_master_node_certs.results }}" + when: + - inventory_hostname in groups['etcd'] + - inventory_hostname != groups['etcd'][0] + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + loop_control: + label: "{{ item.item }}" + +- include_tasks: gen_nodes_certs_script.yml + when: + - inventory_hostname in groups['kube_control_plane'] and + sync_certs|default(false) and inventory_hostname not in groups['etcd'] + +- include_tasks: gen_nodes_certs_script.yml + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] and + sync_certs|default(false) and inventory_hostname not in groups['etcd'] + +- name: Gen_certs | check certificate permissions + file: + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: "{{ etcd_owner }}" + mode: "{{ etcd_cert_dir_mode }}" + recurse: yes diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/gen_nodes_certs_script.yml b/kubespray/extra_playbooks/roles/etcd/tasks/gen_nodes_certs_script.yml new file mode 100644 index 0000000..d176e01 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/gen_nodes_certs_script.yml @@ -0,0 +1,32 @@ +--- +- name: Gen_certs | Set cert names per node + set_fact: + my_etcd_node_certs: [ 'ca.pem', + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem'] + tags: + - facts + +- name: "Check_certs | Set 'sync_certs' to true on nodes" + set_fact: + sync_certs: true + with_items: + - "{{ my_etcd_node_certs }}" + +- name: Gen_certs | Gather node certs + shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0" + args: + executable: /bin/bash + warn: false + no_log: "{{ not (unsafe_show_logs|bool) }}" + register: etcd_node_certs + check_mode: no + delegate_to: "{{ groups['etcd'][0] }}" + changed_when: false + +- name: Gen_certs | Copy certs on nodes + shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}" + args: + executable: /bin/bash + no_log: "{{ not (unsafe_show_logs|bool) }}" + changed_when: false diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/install_docker.yml b/kubespray/extra_playbooks/roles/etcd/tasks/install_docker.yml new file mode 100644 index 0000000..025a0ba --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/install_docker.yml @@ -0,0 +1,45 @@ +--- +- import_tasks: install_etcdctl_docker.yml + when: etcd_cluster_setup + +- name: Get currently-deployed etcd version + shell: "{{ docker_bin_dir }}/docker ps --filter='name={{ etcd_member_name }}' --format='{{ '{{ .Image }}' }}'" + register: etcd_current_docker_image + when: etcd_cluster_setup + +- name: Get currently-deployed etcd-events version + shell: "{{ docker_bin_dir }}/docker ps --filter='name={{ etcd_member_name }}-events' --format='{{ '{{ .Image }}' }}'" + register: etcd_events_current_docker_image + when: etcd_events_cluster_setup + +- name: Restart etcd if necessary + command: /bin/true + notify: restart etcd + when: + - etcd_cluster_setup + - etcd_image_tag not in etcd_current_docker_image.stdout|default('') + +- name: Restart etcd-events if necessary + command: /bin/true + notify: restart etcd-events + when: + - etcd_events_cluster_setup + - etcd_image_tag not in etcd_events_current_docker_image.stdout|default('') + +- name: Install etcd launch script + template: + src: etcd.j2 + dest: "{{ bin_dir }}/etcd" + owner: 'root' + mode: 0750 + backup: yes + when: etcd_cluster_setup + +- name: Install etcd-events launch script + template: + src: etcd-events.j2 + dest: "{{ bin_dir }}/etcd-events" + owner: 'root' + mode: 0750 + backup: yes + when: etcd_events_cluster_setup diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/install_etcdctl_docker.yml b/kubespray/extra_playbooks/roles/etcd/tasks/install_etcdctl_docker.yml new file mode 100644 index 0000000..74ae07f --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/install_etcdctl_docker.yml @@ -0,0 +1,11 @@ +--- +- name: Install | Copy etcdctl binary from docker container + command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy; + {{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} && + {{ docker_bin_dir }}/docker cp etcdctl-binarycopy:/usr/local/bin/etcdctl {{ bin_dir }}/etcdctl && + {{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy" + register: etcdctl_install_result + until: etcdctl_install_result.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/install_host.yml b/kubespray/extra_playbooks/roles/etcd/tasks/install_host.yml new file mode 100644 index 0000000..14a75b4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/install_host.yml @@ -0,0 +1,41 @@ +--- +- name: Get currently-deployed etcd version + command: "{{ bin_dir }}/etcd --version" + register: etcd_current_host_version + # There's a chance this play could run before etcd is installed at all + ignore_errors: true + when: etcd_cluster_setup + +- name: Restart etcd if necessary + command: /bin/true + notify: restart etcd + when: + - etcd_cluster_setup + - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') + +- name: Restart etcd-events if necessary + command: /bin/true + notify: restart etcd-events + when: + - etcd_events_cluster_setup + - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') + +- name: install | Download etcd and etcdctl + include_tasks: "../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.etcd) }}" + when: etcd_cluster_setup + tags: + - never + - etcd + +- name: install | Copy etcd and etcdctl binary from download dir + copy: + src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: yes + with_items: + - etcd + - etcdctl + when: etcd_cluster_setup diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/join_etcd-events_member.yml b/kubespray/extra_playbooks/roles/etcd/tasks/join_etcd-events_member.yml new file mode 100644 index 0000000..8336f1a --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/join_etcd-events_member.yml @@ -0,0 +1,47 @@ +--- +- name: Join Member | Add member to etcd-events cluster # noqa 301 305 + shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" + register: member_add_result + until: member_add_result.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- include_tasks: refresh_config.yml + vars: + etcd_events_peer_addresses: >- + {% for host in groups['etcd'] -%} + {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%} + {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382, + {%- endif -%} + {%- if loop.last -%} + {{ etcd_member_name }}={{ etcd_events_peer_url }} + {%- endif -%} + {%- endfor -%} + +- name: Join Member | Ensure member is in etcd-events cluster + shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ etcd_events_access_address }} >/dev/null" + args: + executable: /bin/bash + register: etcd_events_member_in_cluster + changed_when: false + check_mode: no + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- name: Configure | Ensure etcd-events is running + service: + name: etcd-events + state: started + enabled: yes diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/join_etcd_member.yml b/kubespray/extra_playbooks/roles/etcd/tasks/join_etcd_member.yml new file mode 100644 index 0000000..2244039 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/join_etcd_member.yml @@ -0,0 +1,51 @@ +--- +- name: Join Member | Add member to etcd cluster # noqa 301 305 + shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" + register: member_add_result + until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr + failed_when: member_add_result.rc != 0 and 'Peer URLs already exists' not in member_add_result.stderr + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- include_tasks: refresh_config.yml + vars: + etcd_peer_addresses: >- + {% for host in groups['etcd'] -%} + {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%} + {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380, + {%- endif -%} + {%- if loop.last -%} + {{ etcd_member_name }}={{ etcd_peer_url }} + {%- endif -%} + {%- endfor -%} + +- name: Join Member | Ensure member is in etcd cluster + shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ etcd_access_address }} >/dev/null" + args: + executable: /bin/bash + register: etcd_member_in_cluster + changed_when: false + check_mode: no + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + until: etcd_member_in_cluster.rc == 0 + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Ensure etcd is running + service: + name: etcd + state: started + enabled: yes diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/main.yml b/kubespray/extra_playbooks/roles/etcd/tasks/main.yml new file mode 100644 index 0000000..fb593db --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/main.yml @@ -0,0 +1,77 @@ +--- +- include_tasks: check_certs.yml + when: cert_management == "script" + tags: + - etcd-secrets + - facts + +- include_tasks: "gen_certs_script.yml" + when: + - cert_management |d('script') == "script" + tags: + - etcd-secrets + +- include_tasks: upd_ca_trust.yml + when: + - inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort + tags: + - etcd-secrets + +- include_tasks: upd_ca_trust.yml + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] + tags: + - etcd-secrets + +- name: "Gen_certs | Get etcd certificate serials" + command: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial" + register: "etcd_client_cert_serial_result" + changed_when: false + check_mode: no + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] + tags: + - master + - network + +- name: Set etcd_client_cert_serial + set_fact: + etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] + tags: + - master + - network + +- include_tasks: "install_{{ etcd_deployment_type }}.yml" + when: is_etcd_master + tags: + - upgrade + +- include_tasks: configure.yml + when: is_etcd_master + +- include_tasks: refresh_config.yml + when: is_etcd_master + +- name: Restart etcd if certs changed + command: /bin/true + notify: restart etcd + when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false) + +- name: Restart etcd-events if certs changed + command: /bin/true + notify: restart etcd + when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false) + +# After etcd cluster is assembled, make sure that +# initial state of the cluster is in `existing` +# state instead of `new`. +- include_tasks: refresh_config.yml + when: is_etcd_master diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/refresh_config.yml b/kubespray/extra_playbooks/roles/etcd/tasks/refresh_config.yml new file mode 100644 index 0000000..57010fe --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/refresh_config.yml @@ -0,0 +1,16 @@ +--- +- name: Refresh config | Create etcd config file + template: + src: etcd.env.j2 + dest: /etc/etcd.env + mode: 0640 + notify: restart etcd + when: is_etcd_master and etcd_cluster_setup + +- name: Refresh config | Create etcd-events config file + template: + src: etcd-events.env.j2 + dest: /etc/etcd-events.env + mode: 0640 + notify: restart etcd-events + when: is_etcd_master and etcd_events_cluster_setup diff --git a/kubespray/extra_playbooks/roles/etcd/tasks/upd_ca_trust.yml b/kubespray/extra_playbooks/roles/etcd/tasks/upd_ca_trust.yml new file mode 100644 index 0000000..f806d39 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/tasks/upd_ca_trust.yml @@ -0,0 +1,37 @@ +--- +- name: Gen_certs | target ca-certificate store file + set_fact: + ca_cert_path: |- + {% if ansible_os_family == "Debian" -%} + /usr/local/share/ca-certificates/etcd-ca.crt + {%- elif ansible_os_family == "RedHat" -%} + /etc/pki/ca-trust/source/anchors/etcd-ca.crt + {%- elif ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] -%} + /etc/ssl/certs/etcd-ca.pem + {%- elif ansible_os_family == "Suse" -%} + /etc/pki/trust/anchors/etcd-ca.pem + {%- elif ansible_os_family == "ClearLinux" -%} + /usr/share/ca-certs/etcd-ca.pem + {%- endif %} + tags: + - facts + +- name: Gen_certs | add CA to trusted CA dir + copy: + src: "{{ etcd_cert_dir }}/ca.pem" + dest: "{{ ca_cert_path }}" + remote_src: true + mode: 0640 + register: etcd_ca_cert + +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503 + command: update-ca-certificates + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"] + +- name: Gen_certs | update ca-certificates (RedHat) # noqa 503 + command: update-ca-trust extract + when: etcd_ca_cert.changed and ansible_os_family == "RedHat" + +- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503 + command: clrtrust add "{{ ca_cert_path }}" + when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux" diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd-docker.service.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd-docker.service.j2 new file mode 100644 index 0000000..4dfbd72 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd-docker.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=etcd docker wrapper +Wants=docker.socket +After=docker.service + +[Service] +User=root +PermissionsStartOnly=true +EnvironmentFile=-/etc/etcd.env +ExecStart={{ bin_dir }}/etcd +ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name | default("etcd") }} +ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name | default("etcd") }} +Restart=always +RestartSec=15s +TimeoutStartSec=30s + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd-events-docker.service.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events-docker.service.j2 new file mode 100644 index 0000000..271980a --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events-docker.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=etcd docker wrapper +Wants=docker.socket +After=docker.service + +[Service] +User=root +PermissionsStartOnly=true +EnvironmentFile=-/etc/etcd-events.env +ExecStart={{ bin_dir }}/etcd-events +ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name }}-events +ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name }}-events +Restart=always +RestartSec=15s +TimeoutStartSec=30s + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd-events-host.service.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events-host.service.j2 new file mode 100644 index 0000000..6e0167a --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events-host.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=etcd +After=network.target + +[Service] +Type=notify +User=root +EnvironmentFile=/etc/etcd-events.env +ExecStart={{ bin_dir }}/etcd +NotifyAccess=all +Restart=always +RestartSec=10s +LimitNOFILE=40000 + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd-events.env.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events.env.j2 new file mode 100644 index 0000000..3abefd6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events.env.j2 @@ -0,0 +1,43 @@ +ETCD_DATA_DIR={{ etcd_events_data_dir }} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }} +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }} +ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %} + +ETCD_METRICS={{ etcd_metrics }} +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2383,https://127.0.0.1:2383 +ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} +ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} +ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382 +ETCD_NAME={{ etcd_member_name }}-events +ETCD_PROXY=off +ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} +{% if etcd_snapshot_count is defined %} +ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }} +{% endif %} +{% if etcd_quota_backend_bytes is defined %} +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} +{% endif %} +{% if etcd_max_request_bytes is defined %} +ETCD_MAX_REQUEST_BYTES={{ etcd_max_request_bytes }} +{% endif %} + +# TLS settings +ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}} + +ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} + +{% if etcd_tls_cipher_suites is defined %} +ETCD_CIPHER_SUITES={% for tls in etcd_tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} +{% endif %} + +{% for key, value in etcd_extra_vars.items() %} +{{ key }}={{ value }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd-events.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events.j2 new file mode 100644 index 0000000..b268479 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd-events.j2 @@ -0,0 +1,21 @@ +#!/bin/bash +{{ docker_bin_dir }}/docker run \ + --restart=on-failure:5 \ + --env-file=/etc/etcd-events.env \ + --net=host \ + -v /etc/ssl/certs:/etc/ssl/certs:ro \ + -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ + -v {{ etcd_events_data_dir }}:{{ etcd_events_data_dir }}:rw \ + {% if etcd_memory_limit is defined %} + --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ + {% endif %} + {% if etcd_cpu_limit is defined %} + --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ + {% endif %} + {% if etcd_blkio_weight is defined %} + --blkio-weight={{ etcd_blkio_weight }} \ + {% endif %} + --name={{ etcd_member_name }}-events \ + {{ etcd_image_repo }}:{{ etcd_image_tag }} \ + /usr/local/bin/etcd \ + "$@" diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd-host.service.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd-host.service.j2 new file mode 100644 index 0000000..6bba805 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd-host.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=etcd +After=network.target + +[Service] +Type=notify +User=root +EnvironmentFile=/etc/etcd.env +ExecStart={{ bin_dir }}/etcd +NotifyAccess=all +Restart=always +RestartSec=10s +LimitNOFILE=40000 + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd.env.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd.env.j2 new file mode 100644 index 0000000..18395c9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd.env.j2 @@ -0,0 +1,68 @@ +# Environment file for etcd {{ etcd_version }} +ETCD_DATA_DIR={{ etcd_data_dir }} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }} +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }} +ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %} + +ETCD_METRICS={{ etcd_metrics }} +{% if etcd_metrics_port is defined %} +ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }} +{% endif %} +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2379,https://127.0.0.1:2379 +ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} +ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} +ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380 +ETCD_NAME={{ etcd_member_name }} +ETCD_PROXY=off +ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} +{% if etcd_snapshot_count is defined %} +ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }} +{% endif %} +{% if etcd_quota_backend_bytes is defined %} +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} +{% endif %} +{% if etcd_max_request_bytes is defined %} +ETCD_MAX_REQUEST_BYTES={{ etcd_max_request_bytes }} +{% endif %} +{% if etcd_log_level is defined %} +ETCD_LOG_LEVEL={{ etcd_log_level }} +{% endif %} +{% if etcd_max_snapshots is defined %} +ETCD_MAX_SNAPSHOTS={{ etcd_max_snapshots }} +{% endif %} +{% if etcd_max_wals is defined %} +ETCD_MAX_WALS={{ etcd_max_wals }} +{% endif %} +# Flannel need etcd v2 API +ETCD_ENABLE_V2=true + +# TLS settings +ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}} + +ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} + +{% if etcd_tls_cipher_suites is defined %} +ETCD_CIPHER_SUITES={% for tls in etcd_tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} +{% endif %} + +{% for key, value in etcd_extra_vars.items() %} +{{ key }}={{ value }} +{% endfor %} + +# CLI settings +ETCDCTL_ENDPOINTS=https://127.0.0.1:2379 +ETCDCTL_CACERT={{ etcd_cert_dir }}/ca.pem +ETCDCTL_KEY={{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem +ETCDCTL_CERT={{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem + +# ETCD 3.5.x issue +# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer +ETCD_EXPERIMENTAL_INITIAL_CORRUPT_CHECK={{ etcd_experimental_initial_corrupt_check }} \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/etcd/templates/etcd.j2 b/kubespray/extra_playbooks/roles/etcd/templates/etcd.j2 new file mode 100644 index 0000000..5374c70 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/etcd.j2 @@ -0,0 +1,21 @@ +#!/bin/bash +{{ docker_bin_dir }}/docker run \ + --restart=on-failure:5 \ + --env-file=/etc/etcd.env \ + --net=host \ + -v /etc/ssl/certs:/etc/ssl/certs:ro \ + -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ + -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \ +{% if etcd_memory_limit is defined %} + --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ +{% endif %} +{% if etcd_cpu_limit is defined %} + --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ +{% endif %} +{% if etcd_blkio_weight is defined %} + --blkio-weight={{ etcd_blkio_weight }} \ +{% endif %} + --name={{ etcd_member_name | default("etcd") }} \ + {{ etcd_image_repo }}:{{ etcd_image_tag }} \ + /usr/local/bin/etcd \ + "$@" diff --git a/kubespray/extra_playbooks/roles/etcd/templates/make-ssl-etcd.sh.j2 b/kubespray/extra_playbooks/roles/etcd/templates/make-ssl-etcd.sh.j2 new file mode 100644 index 0000000..d727cff --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/make-ssl-etcd.sh.j2 @@ -0,0 +1,103 @@ +#!/bin/bash + +# Author: Smana smainklh@gmail.com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail +usage() +{ + cat << EOF +Create self signed certificates + +Usage : $(basename $0) -f [-d ] + -h | --help : Show this message + -f | --config : Openssl configuration file + -d | --ssldir : Directory where the certificates will be installed + + ex : + $(basename $0) -f openssl.conf -d /srv/ssl +EOF +} + +# Options parsing +while (($#)); do + case "$1" in + -h | --help) usage; exit 0;; + -f | --config) CONFIG=${2}; shift 2;; + -d | --ssldir) SSLDIR="${2}"; shift 2;; + *) + usage + echo "ERROR : Unknown option" + exit 3 + ;; + esac +done + +if [ -z ${CONFIG} ]; then + echo "ERROR: the openssl configuration file is missing. option -f" + exit 1 +fi +if [ -z ${SSLDIR} ]; then + SSLDIR="/etc/ssl/etcd" +fi + +tmpdir=$(mktemp -d /tmp/etcd_cacert.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +mkdir -p "${SSLDIR}" + +# Root CA +if [ -e "$SSLDIR/ca-key.pem" ]; then + # Reuse existing CA + cp $SSLDIR/{ca.pem,ca-key.pem} . +else + openssl genrsa -out ca-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca-key.pem -days {{certificates_duration}} -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1 +fi + +# ETCD member +if [ -n "$MASTERS" ]; then + for host in $MASTERS; do + cn="${host%%.*}" + # Member key + openssl genrsa -out member-${host}-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + + # Admin key + openssl genrsa -out admin-${host}-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1 + openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + done +fi + +# Node keys +if [ -n "$HOSTS" ]; then + for host in $HOSTS; do + cn="${host%%.*}" + openssl genrsa -out node-${host}-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1 + openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + done +fi + +# Install certs +if [ -e "$SSLDIR/ca-key.pem" ]; then + # No pass existing CA + rm -f ca.pem ca-key.pem +fi + +mv *.pem ${SSLDIR}/ diff --git a/kubespray/extra_playbooks/roles/etcd/templates/openssl.conf.j2 b/kubespray/extra_playbooks/roles/etcd/templates/openssl.conf.j2 new file mode 100644 index 0000000..f6681a1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcd/templates/openssl.conf.j2 @@ -0,0 +1,45 @@ +{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names + +[ ssl_client ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +subjectAltName = @alt_names + +[ v3_ca ] +basicConstraints = CA:TRUE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +authorityKeyIdentifier=keyid:always,issuer + +[alt_names] +DNS.1 = localhost +{% for host in groups['etcd'] %} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} +{% endfor %} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} +{% endif %} +{% for etcd_alt_name in etcd_cert_alt_names %} +DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }} +{% endfor %} +{% for host in groups['etcd'] %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% for cert_alt_ip in etcd_cert_alt_ips %} +IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }} +{% endfor %} +IP.{{ counter["ip"] }} = 127.0.0.1 diff --git a/kubespray/extra_playbooks/roles/etcdctl/tasks/main.yml b/kubespray/extra_playbooks/roles/etcdctl/tasks/main.yml new file mode 100644 index 0000000..fca078c --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcdctl/tasks/main.yml @@ -0,0 +1,65 @@ +--- +# To get the binary from container to host, use the etcd data directory mounted +# rw from host into the container. + +- name: Check unintentional include of this role + assert: + that: etcd_deployment_type == "kubeadm" + +- name: Check if etcdctl exist + stat: + path: "{{ bin_dir }}/etcdctl" + get_attributes: no + get_checksum: no + get_mime: no + register: stat_etcdctl + +- block: + - name: Check version + command: "{{ bin_dir }}/etcdctl version" + register: etcdctl_version + check_mode: no + changed_when: false + + - name: Remove old binary if version is not OK + file: + path: "{{ bin_dir }}/etcdctl" + state: absent + when: etcd_version.lstrip('v') not in etcdctl_version.stdout + when: stat_etcdctl.stat.exists + +- name: Check if etcdctl still exist after version check + stat: + path: "{{ bin_dir }}/etcdctl" + get_attributes: no + get_checksum: no + get_mime: no + register: stat_etcdctl + +- block: + - name: Copy etcdctl script to host + shell: "{{ docker_bin_dir }}/docker exec \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\" cp /usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl" + when: container_manager == "docker" + + - name: Copy etcdctl script to host + shell: "{{ bin_dir }}/crictl exec \"$({{ bin_dir }}/crictl ps -q --image {{ etcd_image_repo }}:{{ etcd_image_tag }})\" cp /usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl" + when: container_manager in ['crio', 'containerd'] + + - name: Copy etcdctl to {{ bin_dir }} + copy: + src: "{{ etcd_data_dir }}/etcdctl" + dest: "{{ bin_dir }}" + remote_src: true + mode: 0755 + when: not stat_etcdctl.stat.exists + +- name: Remove binary in etcd data dir + file: + path: "{{ etcd_data_dir }}/etcdctl" + state: absent + +- name: Create etcdctl wrapper script + template: + src: etcdctl.sh.j2 + dest: "{{ bin_dir }}/etcdctl.sh" + mode: 0755 diff --git a/kubespray/extra_playbooks/roles/etcdctl/templates/etcdctl.sh.j2 b/kubespray/extra_playbooks/roles/etcdctl/templates/etcdctl.sh.j2 new file mode 100644 index 0000000..266bcfd --- /dev/null +++ b/kubespray/extra_playbooks/roles/etcdctl/templates/etcdctl.sh.j2 @@ -0,0 +1,8 @@ +#!/bin/bash +# {{ ansible_managed }} +# example invocation: etcdctl.sh get --keys-only --from-key "" + +etcdctl \ + --cacert {{ kube_cert_dir }}/etcd/ca.crt \ + --cert {{ kube_cert_dir }}/etcd/server.crt \ + --key {{ kube_cert_dir }}/etcd/server.key "$@" diff --git a/kubespray/extra_playbooks/roles/helm-apps/README.md b/kubespray/extra_playbooks/roles/helm-apps/README.md new file mode 100644 index 0000000..27b480c --- /dev/null +++ b/kubespray/extra_playbooks/roles/helm-apps/README.md @@ -0,0 +1,39 @@ +Role Name +========= + +This role is intended to be used to fetch and deploy Helm Charts as part of +cluster installation or upgrading with kubespray. + +Requirements +------------ + +The role needs to be executed on a host with access to the Kubernetes API, and +with the helm binary in place. + +Role Variables +-------------- + +See meta/argument_specs.yml + +Playbook example: + +```yaml +--- +- hosts: kube_control_plane[0] + gather_facts: no + roles: + - name: helm-apps + releases: + - name: app + namespace: app + chart_ref: simple-app/simple-app + - name: app2 + namespace: app + chart_ref: simple-app/simple-app + wait_timeout: "10m" # override the same option in `release_common_opts` + repositories: "{{ repos }}" + - repo_name: simple-app + repo_url: "https://blog.leiwang.info/simple-app" + release_common_opts: "{{ helm_params }}" + wait_timeout: "5m" +``` diff --git a/kubespray/extra_playbooks/roles/helm-apps/meta/argument_specs.yml b/kubespray/extra_playbooks/roles/helm-apps/meta/argument_specs.yml new file mode 100644 index 0000000..d1be9a8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/helm-apps/meta/argument_specs.yml @@ -0,0 +1,93 @@ +--- +argument_specs: + main: + short_description: Install a list of Helm charts. + options: + releases: + type: list + elements: dict + required: true + description: | + List of dictionaries passed as arguments to kubernetes.core.helm. + Arguments passed here will override those in `helm_settings`. For + structure of the dictionary, see the documentation for + kubernetes.core.helm ansible module. + options: + chart_ref: + type: path + required: true + chart_version: + type: str + name: + type: str + required: true + namespace: + type: str + required: true + values: + type: dict + # Possibly general options + create_namespace: + type: bool + chart_repo_url: + type: str + disable_hook: + type: bool + history_max: + type: int + purge: + type: bool + replace: + type: bool + skip_crds: + type: bool + wait: + type: bool + default: true + wait_timeout: + type: str + + repositories: + type: list + elements: dict + description: | + List of dictionaries passed as arguments to + kubernetes.core.helm_repository. + default: [] + options: + name: + type: str + required: true + password: + type: str + username: + type: str + url: + type: str + release_common_opts: + type: dict + description: | + Common arguments for every helm invocation. + default: {} + options: + create_namespace: + type: bool + default: true + chart_repo_url: + type: str + disable_hook: + type: bool + history_max: + type: int + purge: + type: bool + replace: + type: bool + skip_crds: + type: bool + wait: + type: bool + default: true + wait_timeout: + type: str + default: "5m" diff --git a/kubespray/extra_playbooks/roles/helm-apps/tasks/main.yml b/kubespray/extra_playbooks/roles/helm-apps/tasks/main.yml new file mode 100644 index 0000000..ed55c5a --- /dev/null +++ b/kubespray/extra_playbooks/roles/helm-apps/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Add Helm repositories + kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}" + loop: "{{ repositories }}" + +- name: Update Helm repositories + kubernetes.core.helm: + state: absent + binary_path: "{{ bin_dir }}/helm" + release_name: dummy # trick needed to refresh in separate step + release_namespace: kube-system + update_repo_cache: true + when: repositories != [] + +- name: Install Helm Applications + kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}" + loop: "{{ releases }}" diff --git a/kubespray/extra_playbooks/roles/helm-apps/vars/main.yml b/kubespray/extra_playbooks/roles/helm-apps/vars/main.yml new file mode 100644 index 0000000..a7baa66 --- /dev/null +++ b/kubespray/extra_playbooks/roles/helm-apps/vars/main.yml @@ -0,0 +1,7 @@ +--- +helm_defaults: + atomic: true + binary_path: "{{ bin_dir }}/helm" + +helm_repository_defaults: + binary_path: "{{ bin_dir }}/helm" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/defaults/main.yml new file mode 100644 index 0000000..66b7673 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/defaults/main.yml @@ -0,0 +1,93 @@ +--- +# Limits for coredns +dns_memory_limit: 300Mi +dns_cpu_requests: 100m +dns_memory_requests: 70Mi +dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}" +dns_nodes_per_replica: 16 +dns_cores_per_replica: 256 +dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}" +enable_coredns_reverse_dns_lookups: true +coredns_ordinal_suffix: "" +# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] +coredns_deployment_nodeselector: "kubernetes.io/os: linux" +coredns_default_zone_cache_block: | + cache 30 + +# dns_upstream_forward_extra_opts apply to coredns forward section as well as nodelocaldns upstream target forward section +# dns_upstream_forward_extra_opts: +# policy: sequential + +# nodelocaldns +nodelocaldns_cpu_requests: 100m +nodelocaldns_memory_limit: 200Mi +nodelocaldns_memory_requests: 70Mi +nodelocaldns_ds_nodeselector: "kubernetes.io/os: linux" +nodelocaldns_prometheus_port: 9253 +nodelocaldns_secondary_prometheus_port: 9255 + +# Limits for dns-autoscaler +dns_autoscaler_cpu_requests: 20m +dns_autoscaler_memory_requests: 10Mi +dns_autoscaler_deployment_nodeselector: "kubernetes.io/os: linux" +# dns_autoscaler_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] + +# etcd metrics +# etcd_metrics_service_labels: +# k8s-app: etcd +# app.kubernetes.io/managed-by: Kubespray +# app: kube-prometheus-stack-kube-etcd +# release: prometheus-stack + +# Netchecker +deploy_netchecker: false +netchecker_port: 31081 +agent_report_interval: 15 +netcheck_namespace: default + +# Limits for netchecker apps +netchecker_agent_cpu_limit: 30m +netchecker_agent_memory_limit: 100M +netchecker_agent_cpu_requests: 15m +netchecker_agent_memory_requests: 64M +netchecker_server_cpu_limit: 100m +netchecker_server_memory_limit: 256M +netchecker_server_cpu_requests: 50m +netchecker_server_memory_requests: 64M +netchecker_etcd_cpu_limit: 200m +netchecker_etcd_memory_limit: 256M +netchecker_etcd_cpu_requests: 100m +netchecker_etcd_memory_requests: 128M + +# SecurityContext when PodSecurityPolicy is enabled +netchecker_agent_user: 1000 +netchecker_server_user: 1000 +netchecker_agent_group: 1000 +netchecker_server_group: 1000 + +# Dashboard +dashboard_replicas: 1 + +# Namespace for dashboard +dashboard_namespace: kube-system + +# Limits for dashboard +dashboard_cpu_limit: 100m +dashboard_memory_limit: 256M +dashboard_cpu_requests: 50m +dashboard_memory_requests: 64M + +# Set dashboard_use_custom_certs to true if overriding dashboard_certs_secret_name with a secret that +# contains dashboard_tls_key_file and dashboard_tls_cert_file instead of using the initContainer provisioned certs +dashboard_use_custom_certs: false +dashboard_certs_secret_name: kubernetes-dashboard-certs +dashboard_tls_key_file: dashboard.key +dashboard_tls_cert_file: dashboard.crt +dashboard_master_toleration: true + +# Override dashboard default settings +dashboard_token_ttl: 900 +dashboard_skip_login: false + +# Policy Controllers +# policy_controller_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml new file mode 100644 index 0000000..fef5246 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -0,0 +1,44 @@ +--- +- name: Kubernetes Apps | Register coredns deployment annotation `createdby` + command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" + register: createdby_annotation_deploy + changed_when: false + check_mode: false + ignore_errors: true # noqa ignore-errors + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Register coredns service annotation `createdby` + command: "{{ kubectl }} get svc -n kube-system coredns -o jsonpath='{ .metadata.annotations.createdby }'" + register: createdby_annotation_svc + changed_when: false + check_mode: false + ignore_errors: true # noqa ignore-errors + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Delete kubeadm CoreDNS + kube: + name: "coredns" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "deploy" + state: absent + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + - createdby_annotation_deploy.stdout != 'kubespray' + +- name: Kubernetes Apps | Delete kubeadm Kube-DNS service + kube: + name: "kube-dns" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "svc" + state: absent + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + - createdby_annotation_svc.stdout != 'kubespray' diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/coredns.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/coredns.yml new file mode 100644 index 0000000..d8f8547 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/coredns.yml @@ -0,0 +1,44 @@ +--- +- name: Kubernetes Apps | Lay Down CoreDNS templates + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + loop: + - { name: coredns, file: coredns-clusterrole.yml, type: clusterrole } + - { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding } + - { name: coredns, file: coredns-config.yml, type: configmap } + - { name: coredns, file: coredns-deployment.yml, type: deployment } + - { name: coredns, file: coredns-sa.yml, type: sa } + - { name: coredns, file: coredns-svc.yml, type: svc } + - { name: dns-autoscaler, file: dns-autoscaler.yml, type: deployment } + - { name: dns-autoscaler, file: dns-autoscaler-clusterrole.yml, type: clusterrole } + - { name: dns-autoscaler, file: dns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding } + - { name: dns-autoscaler, file: dns-autoscaler-sa.yml, type: sa } + register: coredns_manifests + vars: + clusterIP: "{{ skydns_server }}" + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - coredns + +- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template + template: + src: "{{ item.src }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment } + - { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc } + - { name: dns-autoscaler, src: dns-autoscaler.yml, file: coredns-autoscaler-secondary.yml, type: deployment } + register: coredns_secondary_manifests + vars: + clusterIP: "{{ skydns_server_secondary }}" + coredns_ordinal_suffix: "-secondary" + when: + - dns_mode == 'coredns_dual' + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - coredns diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/dashboard.yml new file mode 100644 index 0000000..480b3db --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -0,0 +1,21 @@ +--- +- name: Kubernetes Apps | Lay down dashboard template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { file: dashboard.yml, type: deploy, name: kubernetes-dashboard } + register: manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Start dashboard + kube: + name: "{{ item.item.name }}" + namespace: "{{ dashboard_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/etcd_metrics.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/etcd_metrics.yml new file mode 100644 index 0000000..548de89 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/etcd_metrics.yml @@ -0,0 +1,22 @@ +--- +- name: Kubernetes Apps | Lay down etcd_metrics templates + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { file: etcd_metrics-endpoints.yml, type: endpoints, name: etcd-metrics } + - { file: etcd_metrics-service.yml, type: service, name: etcd-metrics } + register: manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Start etcd_metrics + kube: + name: "{{ item.item.name }}" + namespace: kube-system + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/main.yml new file mode 100644 index 0000000..4a0180e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: Kubernetes Apps | Wait for kube-apiserver + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + client_cert: "{{ kube_apiserver_client_cert }}" + client_key: "{{ kube_apiserver_client_key }}" + register: result + until: result.status == 200 + retries: 20 + delay: 1 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Cleanup DNS + import_tasks: cleanup_dns.yml + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + - coredns + - nodelocaldns + +- name: Kubernetes Apps | CoreDNS + import_tasks: "coredns.yml" + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - coredns + +- name: Kubernetes Apps | nodelocalDNS + import_tasks: "nodelocaldns.yml" + when: + - enable_nodelocaldns + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + +- name: Kubernetes Apps | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ coredns_manifests.results | default({}) }}" + - "{{ coredns_secondary_manifests.results | default({}) }}" + - "{{ nodelocaldns_manifests.results | default({}) }}" + - "{{ nodelocaldns_second_manifests.results | default({}) }}" + when: + - dns_mode != 'none' + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + register: resource_result + until: resource_result is succeeded + retries: 4 + delay: 5 + tags: + - coredns + - nodelocaldns + loop_control: + label: "{{ item.item.file }}" + +- name: Kubernetes Apps | Etcd metrics endpoints + import_tasks: etcd_metrics.yml + when: etcd_metrics_port is defined and etcd_metrics_service_labels is defined + tags: + - etcd_metrics + +- name: Kubernetes Apps | Netchecker + import_tasks: netchecker.yml + when: deploy_netchecker + tags: + - netchecker + +- name: Kubernetes Apps | Dashboard + import_tasks: dashboard.yml + when: dashboard_enabled + tags: + - dashboard diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/netchecker.yml new file mode 100644 index 0000000..b83fd33 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -0,0 +1,56 @@ +--- +- name: Kubernetes Apps | Check AppArmor status + command: which apparmor_parser + register: apparmor_status + when: + - inventory_hostname == groups['kube_control_plane'][0] + failed_when: false + +- name: Kubernetes Apps | Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Netchecker Templates list + set_fact: + netchecker_templates: + - {file: netchecker-ns.yml, type: ns, name: netchecker-namespace} + - {file: netchecker-agent-sa.yml, type: sa, name: netchecker-agent} + - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent} + - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet} + - {file: netchecker-server-sa.yml, type: sa, name: netchecker-server} + - {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server} + - {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server} + - {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server} + - {file: netchecker-server-svc.yml, type: svc, name: netchecker-service} + netchecker_templates_for_psp: + - {file: netchecker-agent-hostnet-psp.yml, type: podsecuritypolicy, name: netchecker-agent-hostnet-policy} + - {file: netchecker-agent-hostnet-clusterrole.yml, type: clusterrole, name: netchecker-agent} + - {file: netchecker-agent-hostnet-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-agent} + +- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy + set_fact: + netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}" + when: podsecuritypolicy_enabled + +- name: Kubernetes Apps | Lay Down Netchecker Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: "{{ netchecker_templates }}" + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Start Netchecker Resources + kube: + name: "{{ item.item.name }}" + namespace: "{{ netcheck_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml new file mode 100644 index 0000000..b94509f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml @@ -0,0 +1,75 @@ +--- +- name: Kubernetes Apps | set up necessary nodelocaldns parameters + set_fact: + primaryClusterIP: >- + {%- if dns_mode in ['coredns', 'coredns_dual'] -%} + {{ skydns_server }} + {%- elif dns_mode == 'manual' -%} + {{ manual_dns_server }} + {%- endif -%} + secondaryclusterIP: "{{ skydns_server_secondary }}" + when: + - enable_nodelocaldns + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + - coredns + +- name: Kubernetes Apps | Lay Down nodelocaldns Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { name: nodelocaldns, file: nodelocaldns-config.yml, type: configmap } + - { name: nodelocaldns, file: nodelocaldns-sa.yml, type: sa } + - { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset } + register: nodelocaldns_manifests + vars: + forwardTarget: >- + {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} + {{ primaryClusterIP }} {{ secondaryclusterIP }} + {%- else -%} + {{ primaryClusterIP }} + {%- endif -%} + upstreamForwardTarget: >- + {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} + {{ upstream_dns_servers|join(' ') }} + {%- else -%} + /etc/resolv.conf + {%- endif -%} + when: + - enable_nodelocaldns + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + - coredns + +- name: Kubernetes Apps | Lay Down nodelocaldns-secondary Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset } + register: nodelocaldns_second_manifests + vars: + forwardTarget: >- + {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} + {{ primaryClusterIP }} {{ secondaryclusterIP }} + {%- else -%} + {{ primaryClusterIP }} + {%- endif -%} + upstreamForwardTarget: >- + {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} + {{ upstream_dns_servers|join(' ') }} + {%- else -%} + /etc/resolv.conf + {%- endif -%} + when: + - enable_nodelocaldns + - enable_nodelocaldns_secondary + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + - coredns diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 new file mode 100644 index 0000000..79c4e77 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: Reconcile + name: system:coredns +rules: + - apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..af7f684 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: EnsureExists + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: + - kind: ServiceAccount + name: coredns + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 new file mode 100644 index 0000000..44eea93 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -0,0 +1,74 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | +{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %} +{% for block in coredns_external_zones %} + {{ block['zones'] | join(' ') }} { + log + errors +{% if block['rewrite'] is defined and block['rewrite']|length > 0 %} +{% for rewrite_match in block['rewrite'] %} + rewrite {{ rewrite_match }} +{% endfor %} +{% endif %} + forward . {{ block['nameservers'] | join(' ') }} + loadbalance + cache {{ block['cache'] | default(5) }} + reload +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} +{% endif %} + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes {{ dns_domain }} {% if enable_coredns_reverse_dns_lookups %}in-addr.arpa ip6.arpa {% endif %}{ + pods insecure +{% if enable_coredns_k8s_endpoint_pod_names %} + endpoint_pod_names +{% endif %} +{% if enable_coredns_reverse_dns_lookups %} + fallthrough in-addr.arpa ip6.arpa +{% endif %} + } + prometheus :9153 + forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} { + prefer_udp + max_concurrent 1000 +{% if dns_upstream_forward_extra_opts is defined %} +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} +{% endif %} + } +{% if enable_coredns_k8s_external %} + k8s_external {{ coredns_k8s_external_zone }} +{% endif %} + {{ coredns_default_zone_cache_block | indent(width=8, first=False) }} + loop + reload + loadbalance +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% if dns_etchosts | default(None) %} + hosts: | + {{ dns_etchosts | indent(width=4, first=False) }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 new file mode 100644 index 0000000..fa81069 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -0,0 +1,119 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "coredns{{ coredns_ordinal_suffix }}" + namespace: kube-system + labels: + k8s-app: "kube-dns{{ coredns_ordinal_suffix }}" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 10% + selector: + matchLabels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + template: + metadata: + labels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + createdby: 'kubespray' + spec: + nodeSelector: + {{ coredns_deployment_nodeselector }} + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% if dns_extra_tolerations is defined %} + {{ dns_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "" + containers: + - name: coredns + image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: {{ dns_memory_limit }} + requests: + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +{% if dns_etchosts | default(None) %} + - key: hosts + path: hosts +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 new file mode 100644 index 0000000..daebd6a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 new file mode 100644 index 0000000..0e051c3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: coredns{{ coredns_ordinal_suffix }} + namespace: kube-system + labels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}" + addonmanager.kubernetes.io/mode: Reconcile + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + createdby: 'kubespray' +spec: + selector: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + clusterIP: {{ clusterIP }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 new file mode 100644 index 0000000..b0c3419 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -0,0 +1,339 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration to deploy release version of the Dashboard UI compatible with +# Kubernetes 1.8. +# +# Example usage: kubectl create -f + +{% if dashboard_namespace != "kube-system" %} +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ dashboard_namespace }} + labels: + name: {{ dashboard_namespace }} +{% endif %} + +--- +# ------------------- Dashboard Secrets ------------------- # +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: {{ dashboard_namespace }} +type: Opaque + +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: {{ dashboard_namespace }} +type: Opaque +data: + csrf: "" + +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: {{ dashboard_namespace }} +type: Opaque + +--- +# ------------------- Dashboard ConfigMap ------------------- # +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: {{ dashboard_namespace }} + +--- +# ------------------- Dashboard Service Account ------------------- # + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} + +--- +# ------------------- Dashboard Role & Role Binding ------------------- # +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} + +--- +# ------------------- Dashboard Deployment ------------------- # + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +spec: + replicas: {{ dashboard_replicas }} + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + priorityClassName: system-cluster-critical + containers: + - name: kubernetes-dashboard + image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ dashboard_cpu_limit }} + memory: {{ dashboard_memory_limit }} + requests: + cpu: {{ dashboard_cpu_requests }} + memory: {{ dashboard_memory_requests }} + ports: + - containerPort: 8443 + protocol: TCP + args: + - --namespace={{ dashboard_namespace }} +{% if dashboard_use_custom_certs %} + - --tls-key-file={{ dashboard_tls_key_file }} + - --tls-cert-file={{ dashboard_tls_cert_file }} +{% else %} + - --auto-generate-certificates +{% endif %} +{% if dashboard_skip_login %} + - --enable-skip-login +{% endif %} + - --authentication-mode=token + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + - --token-ttl={{ dashboard_token_ttl }} + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: {{ dashboard_certs_secret_name }} + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard +{% if dashboard_master_toleration %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% endif %} + +--- +# ------------------- Dashboard Service ------------------- # + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- +# ------------------- Metrics Scrapper Service Account ------------------- # + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +# ------------------- Metrics Scrapper Service ------------------- # +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-metrics-scraper + name: dashboard-metrics-scraper + namespace: {{ dashboard_namespace }} +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: kubernetes-metrics-scraper + +--- + +# ------------------- Metrics Scrapper Deployment ------------------- # +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-metrics-scraper + name: kubernetes-metrics-scraper + namespace: {{ dashboard_namespace }} +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-metrics-scraper + template: + metadata: + labels: + k8s-app: kubernetes-metrics-scraper + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + priorityClassName: system-cluster-critical + containers: + - name: kubernetes-metrics-scraper + image: {{ dashboard_metrics_scraper_repo }}:{{ dashboard_metrics_scraper_tag }} + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + serviceAccountName: kubernetes-dashboard + volumes: + - name: tmp-volume + emptyDir: {} +{% if dashboard_master_toleration %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrole.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrole.yml.j2 new file mode 100644 index 0000000..ef642ce --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrole.yml.j2 @@ -0,0 +1,34 @@ +--- +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:dns-autoscaler + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..da1a0a9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrolebinding.yml.j2 @@ -0,0 +1,29 @@ +--- +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:dns-autoscaler + labels: + addonmanager.kubernetes.io/mode: Reconcile +subjects: + - kind: ServiceAccount + name: dns-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:dns-autoscaler + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-sa.yml.j2 new file mode 100644 index 0000000..3ce9b51 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler-sa.yml.j2 @@ -0,0 +1,22 @@ +--- +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: dns-autoscaler + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 new file mode 100644 index 0000000..6ea1651 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 @@ -0,0 +1,87 @@ +--- +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dns-autoscaler{{ coredns_ordinal_suffix }} + namespace: kube-system + labels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + template: + metadata: + labels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + nodeSelector: + {{ dns_autoscaler_deployment_nodeselector}} + priorityClassName: system-cluster-critical + securityContext: + supplementalGroups: [ 65534 ] + fsGroup: 65534 + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +{% if dns_autoscaler_extra_tolerations is defined %} + {{ dns_autoscaler_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "" + containers: + - name: autoscaler + image: "{{ dnsautoscaler_image_repo }}:{{ dnsautoscaler_image_tag }}" + resources: + requests: + cpu: {{ dns_autoscaler_cpu_requests }} + memory: {{ dns_autoscaler_memory_requests }} + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --default-params={"linear":{"preventSinglePointFailure":{{ dns_prevent_single_point_failure }},"coresPerReplica":{{ dns_cores_per_replica }},"nodesPerReplica":{{ dns_nodes_per_replica }},"min":{{ dns_min_replicas }}}} + - --logtostderr=true + - --v=2 + - --configmap=dns-autoscaler{{ coredns_ordinal_suffix }} + - --target=Deployment/coredns{{ coredns_ordinal_suffix }} + serviceAccountName: dns-autoscaler diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/etcd_metrics-endpoints.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/etcd_metrics-endpoints.yml.j2 new file mode 100644 index 0000000..18f515d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/etcd_metrics-endpoints.yml.j2 @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Endpoints +metadata: + name: etcd-metrics + namespace: kube-system + labels: + k8s-app: etcd + app.kubernetes.io/managed-by: Kubespray +subsets: +{% for etcd_metrics_address, etcd_host in etcd_metrics_addresses.split(',') | zip(etcd_hosts) %} + - addresses: + - ip: {{ etcd_metrics_address | urlsplit('hostname') }} + targetRef: + kind: Node + name: {{ etcd_host }} + ports: + - name: http-metrics + port: {{ etcd_metrics_address | urlsplit('port') }} + protocol: TCP +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2 new file mode 100644 index 0000000..5bd9254 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: etcd-metrics + namespace: kube-system + labels: + {{ etcd_metrics_service_labels | to_yaml(indent=2, width=1337) | indent(width=4) }} +spec: + ports: + - name: http-metrics + protocol: TCP + port: {{ etcd_metrics_port }} + # targetPort: diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 new file mode 100644 index 0000000..47dbf70 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: netchecker-agent + name: netchecker-agent + namespace: {{ netcheck_namespace }} +spec: + selector: + matchLabels: + app: netchecker-agent + template: + metadata: + name: netchecker-agent + labels: + app: netchecker-agent + spec: + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + tolerations: + - effect: NoSchedule + operator: Exists + nodeSelector: + kubernetes.io/os: linux + containers: + - name: netchecker-agent + image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + args: + - "-v=5" + - "-alsologtostderr=true" + - "-serverendpoint=netchecker-service:8081" + - "-reportinterval={{ agent_report_interval }}" + resources: + limits: + cpu: {{ netchecker_agent_cpu_limit }} + memory: {{ netchecker_agent_memory_limit }} + requests: + cpu: {{ netchecker_agent_cpu_requests }} + memory: {{ netchecker_agent_memory_requests }} + securityContext: + runAsUser: {{ netchecker_agent_user | default('0') }} + runAsGroup: {{ netchecker_agent_group | default('0') }} + serviceAccountName: netchecker-agent + updateStrategy: + rollingUpdate: + maxUnavailable: 100% + type: RollingUpdate diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 new file mode 100644 index 0000000..0e23150 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 @@ -0,0 +1,14 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:netchecker-agent-hostnet + namespace: {{ netcheck_namespace }} +rules: + - apiGroups: + - policy + resourceNames: + - netchecker-agent-hostnet + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..cf44515 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:netchecker-agent-hostnet + namespace: {{ netcheck_namespace }} +subjects: + - kind: ServiceAccount + name: netchecker-agent + namespace: {{ netcheck_namespace }} +roleRef: + kind: ClusterRole + name: psp:netchecker-agent-hostnet + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 new file mode 100644 index 0000000..8b2e51a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: netchecker-agent-hostnet + name: netchecker-agent-hostnet + namespace: {{ netcheck_namespace }} +spec: + selector: + matchLabels: + app: netchecker-agent-hostnet + template: + metadata: + name: netchecker-agent-hostnet + labels: + app: netchecker-agent-hostnet + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + tolerations: + - effect: NoSchedule + operator: Exists + containers: + - name: netchecker-agent + image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + args: + - "-v=5" + - "-alsologtostderr=true" + - "-serverendpoint=netchecker-service:8081" + - "-reportinterval={{ agent_report_interval }}" + resources: + limits: + cpu: {{ netchecker_agent_cpu_limit }} + memory: {{ netchecker_agent_memory_limit }} + requests: + cpu: {{ netchecker_agent_cpu_requests }} + memory: {{ netchecker_agent_memory_requests }} + securityContext: + runAsUser: {{ netchecker_agent_user | default('0') }} + runAsGroup: {{ netchecker_agent_group | default('0') }} + serviceAccountName: netchecker-agent + updateStrategy: + rollingUpdate: + maxUnavailable: 100% + type: RollingUpdate diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 new file mode 100644 index 0000000..21b397d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: netchecker-agent-hostnet + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 new file mode 100644 index 0000000..c544043 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: netchecker-agent + namespace: {{ netcheck_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2 new file mode 100644 index 0000000..3dd87aa --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2 @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ netcheck_namespace }}" + labels: + name: "{{ netcheck_namespace }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 new file mode 100644 index 0000000..290dec3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 @@ -0,0 +1,9 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["list", "get"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..55301b7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} +subjects: + - kind: ServiceAccount + name: netchecker-server + namespace: {{ netcheck_namespace }} +roleRef: + kind: ClusterRole + name: netchecker-server + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 new file mode 100644 index 0000000..edda5c5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} + labels: + app: netchecker-server +spec: + replicas: 1 + selector: + matchLabels: + app: netchecker-server + template: + metadata: + name: netchecker-server + labels: + app: netchecker-server + spec: + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + volumes: + - name: etcd-data + emptyDir: {} + containers: + - name: netchecker-server + image: "{{ netcheck_server_image_repo }}:{{ netcheck_server_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ netchecker_server_cpu_limit }} + memory: {{ netchecker_server_memory_limit }} + requests: + cpu: {{ netchecker_server_cpu_requests }} + memory: {{ netchecker_server_memory_requests }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + runAsUser: {{ netchecker_server_user | default('0') }} + runAsGroup: {{ netchecker_server_group | default('0') }} + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + ports: + - containerPort: 8081 + args: + - -v=5 + - -logtostderr + - -kubeproxyinit=false + - -endpoint=0.0.0.0:8081 + - -etcd-endpoints=http://127.0.0.1:2379 + - name: etcd + image: "{{ etcd_image_repo }}:{{ netcheck_etcd_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - etcd + - --listen-client-urls=http://127.0.0.1:2379 + - --advertise-client-urls=http://127.0.0.1:2379 + - --data-dir=/var/lib/etcd + - --enable-v2 + - --force-new-cluster + volumeMounts: + - mountPath: /var/lib/etcd + name: etcd-data + resources: + limits: + cpu: {{ netchecker_etcd_cpu_limit }} + memory: {{ netchecker_etcd_memory_limit }} + requests: + cpu: {{ netchecker_etcd_cpu_requests }} + memory: {{ netchecker_etcd_memory_requests }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + runAsUser: {{ netchecker_server_user | default('0') }} + runAsGroup: {{ netchecker_server_group | default('0') }} + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + tolerations: + - effect: NoSchedule + operator: Exists + serviceAccountName: netchecker-server diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 new file mode 100644 index 0000000..e3ec07f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2 new file mode 100644 index 0000000..dc38946 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2 @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: netchecker-service + namespace: {{ netcheck_namespace }} +spec: + selector: + app: netchecker-server + ports: + - + protocol: TCP + port: 8081 + targetPort: 8081 + nodePort: {{ netchecker_port }} + type: NodePort diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 new file mode 100644 index 0000000..231c8ba --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 @@ -0,0 +1,182 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nodelocaldns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists + +data: + Corefile: | +{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %} +{% for block in nodelocaldns_external_zones %} + {{ block['zones'] | join(' ') }} { + errors + cache {{ block['cache'] | default(30) }} + reload +{% if block['rewrite'] is defined and block['rewrite']|length > 0 %} +{% for rewrite_match in block['rewrite'] %} + rewrite {{ rewrite_match }} +{% endfor %} +{% endif %} + loop + bind {{ nodelocaldns_ip }} + forward . {{ block['nameservers'] | join(' ') }} + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + log +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} +{% endif %} + {{ dns_domain }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + health {{ nodelocaldns_ip }}:{{ nodelocaldns_health_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + } + .:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} + }{% endif %} + + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% if enable_nodelocaldns_secondary %} + Corefile-second: | +{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %} +{% for block in nodelocaldns_external_zones %} + {{ block['zones'] | join(' ') }} { + errors + cache {{ block['cache'] | default(30) }} + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ block['nameservers'] | join(' ') }} + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + log +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} +{% endif %} + {{ dns_domain }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + health {{ nodelocaldns_ip }}:{{ nodelocaldns_second_health_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + } + .:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} + }{% endif %} + + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endif %} +{% if dns_etchosts | default(None) %} + hosts: | + {{ dns_etchosts | indent(width=4, first=False) }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2 new file mode 100644 index 0000000..7c63e28 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2 @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nodelocaldns + namespace: kube-system + labels: + k8s-app: kube-dns + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nodelocaldns + template: + metadata: + labels: + k8s-app: nodelocaldns + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '{{ nodelocaldns_prometheus_port }}' + spec: + nodeSelector: + {{ nodelocaldns_ds_nodeselector }} + priorityClassName: system-cluster-critical + serviceAccountName: nodelocaldns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - effect: NoSchedule + operator: "Exists" + - effect: NoExecute + operator: "Exists" + containers: + - name: node-cache + image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}" + resources: + limits: + memory: {{ nodelocaldns_memory_limit }} + requests: + cpu: {{ nodelocaldns_cpu_requests }} + memory: {{ nodelocaldns_memory_requests }} + args: + - -localip + - {{ nodelocaldns_ip }} + - -conf + - /etc/coredns/Corefile + - -upstreamsvc + - coredns +{% if enable_nodelocaldns_secondary %} + - -skipteardown +{% else %} + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP +{% endif %} + securityContext: + privileged: true +{% if nodelocaldns_bind_metrics_host_ip %} + env: + - name: MY_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + livenessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: config-volume + configMap: + name: nodelocaldns + items: + - key: Corefile + path: Corefile +{% if dns_etchosts | default(None) %} + - key: hosts + path: hosts +{% endif %} + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2 new file mode 100644 index 0000000..bd962d8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2 @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nodelocaldns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2 new file mode 100644 index 0000000..037bf44 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2 @@ -0,0 +1,103 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nodelocaldns-second + namespace: kube-system + labels: + k8s-app: kube-dns + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nodelocaldns-second + template: + metadata: + labels: + k8s-app: nodelocaldns-second + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '{{ nodelocaldns_secondary_prometheus_port }}' + spec: + nodeSelector: + {{ nodelocaldns_ds_nodeselector }} + priorityClassName: system-cluster-critical + serviceAccountName: nodelocaldns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - effect: NoSchedule + operator: "Exists" + - effect: NoExecute + operator: "Exists" + containers: + - name: node-cache + image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}" + resources: + limits: + memory: {{ nodelocaldns_memory_limit }} + requests: + cpu: {{ nodelocaldns_cpu_requests }} + memory: {{ nodelocaldns_memory_requests }} + args: [ "-localip", "{{ nodelocaldns_ip }}", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns", "-skipteardown" ] + securityContext: + privileged: true +{% if nodelocaldns_bind_metrics_host_ip %} + env: + - name: MY_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + livenessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: xtables-lock + mountPath: /run/xtables.lock + lifecycle: + preStop: + exec: + command: + - sh + - -c + - sleep {{ nodelocaldns_secondary_skew_seconds }} && kill -9 1 + volumes: + - name: config-volume + configMap: + name: nodelocaldns + items: + - key: Corefile-second + path: Corefile +{% if dns_etchosts | default(None) %} + - key: hosts + path: hosts +{% endif %} + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Implement a time skew between the main nodelocaldns and this secondary. + # Since the two nodelocaldns instances share the :53 port, we want to keep + # at least one running at any time enven if the manifests are replaced simultaneously + terminationGracePeriodSeconds: {{ nodelocaldns_secondary_skew_seconds }} + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/defaults/main.yml new file mode 100644 index 0000000..64f8a36 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/defaults/main.yml @@ -0,0 +1,5 @@ +--- +argocd_enabled: false +argocd_version: v2.5.5 +argocd_namespace: argocd +# argocd_admin_password: diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/tasks/main.yml new file mode 100644 index 0000000..a6a4450 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/tasks/main.yml @@ -0,0 +1,79 @@ +--- +- name: Kubernetes Apps | Install yq + become: yes + get_url: + url: "https://github.com/mikefarah/yq/releases/download/v4.30.6/yq_linux_{{ host_architecture }}" + dest: "{{ bin_dir }}/yq" + mode: '0755' + +- name: Kubernetes Apps | Set ArgoCD template list + set_fact: + argocd_templates: + - name: namespace + file: argocd-namespace.yml + - name: install + file: argocd-install.yml + namespace: "{{ argocd_namespace }}" + url: "https://raw.githubusercontent.com/argoproj/argo-cd/{{ argocd_version }}/manifests/install.yaml" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Download ArgoCD remote manifests + become: yes + get_url: + url: "{{ item.url }}" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}" + loop_control: + label: "{{ item.file }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Set ArgoCD namespace for remote manifests + become: yes + command: | + {{ bin_dir }}/yq eval-all -i '.metadata.namespace="{{ argocd_namespace }}"' {{ kube_config_dir }}/{{ item.file }} + with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}" + loop_control: + label: "{{ item.file }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Create ArgoCD manifests from templates + become: yes + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: "{{ argocd_templates | selectattr('url', 'undefined') | list }}" + loop_control: + label: "{{ item.file }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Install ArgoCD + become: yes + kube: + name: ArgoCD + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.file }}" + state: latest + with_items: "{{ argocd_templates }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +# https://github.com/argoproj/argo-cd/blob/master/docs/faq.md#i-forgot-the-admin-password-how-do-i-reset-it +- name: Kubernetes Apps | Set ArgoCD custom admin password + become: yes + shell: | + {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n {{ argocd_namespace }} patch secret argocd-secret -p \ + '{ + "stringData": { + "admin.password": "{{ argocd_admin_password | password_hash('bcrypt') }}", + "admin.passwordMtime": "'$(date +%FT%T%Z)'" + } + }' + when: + - argocd_admin_password is defined + - "inventory_hostname == groups['kube_control_plane'][0]" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2 new file mode 100644 index 0000000..99962f1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{argocd_namespace}} + labels: + app: argocd diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml new file mode 100644 index 0000000..9d7ddf0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml @@ -0,0 +1,6 @@ +--- + +oci_security_list_management: All +oci_use_instance_principals: false +oci_cloud_controller_version: 0.7.0 +oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml new file mode 100644 index 0000000..9eb8794 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml @@ -0,0 +1,67 @@ +--- + +- name: "OCI Cloud Controller | Credentials Check | oci_private_key" + fail: + msg: "oci_private_key is missing" + when: + - not oci_use_instance_principals + - oci_private_key is not defined or not oci_private_key + +- name: "OCI Cloud Controller | Credentials Check | oci_region_id" + fail: + msg: "oci_region_id is missing" + when: + - not oci_use_instance_principals + - oci_region_id is not defined or not oci_region_id + +- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id" + fail: + msg: "oci_tenancy_id is missing" + when: + - not oci_use_instance_principals + - oci_tenancy_id is not defined or not oci_tenancy_id + +- name: "OCI Cloud Controller | Credentials Check | oci_user_id" + fail: + msg: "oci_user_id is missing" + when: + - not oci_use_instance_principals + - oci_user_id is not defined or not oci_user_id + +- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint" + fail: + msg: "oci_user_fingerprint is missing" + when: + - not oci_use_instance_principals + - oci_user_fingerprint is not defined or not oci_user_fingerprint + +- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id" + fail: + msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides" + when: + - oci_compartment_id is not defined or not oci_compartment_id + +- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id" + fail: + msg: "oci_vnc_id is missing. This is the Virtual Cloud Network in which the cluster resides" + when: + - oci_vnc_id is not defined or not oci_vnc_id + +- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id" + fail: + msg: "oci_subnet1_id is missingg. This is the first subnet to which loadbalancers will be added" + when: + - oci_subnet1_id is not defined or not oci_subnet1_id + +- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id" + fail: + msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability" + when: + - oci_cloud_controller_version is version_compare('0.7.0', '<') + - oci_subnet2_id is not defined or not oci_subnet2_id + +- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management" + fail: + msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)." + when: + - oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml new file mode 100644 index 0000000..2224ae5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml @@ -0,0 +1,34 @@ +--- + +- include: credentials-check.yml + +- name: "OCI Cloud Controller | Generate Cloud Provider Configuration" + template: + src: controller-manager-config.yml.j2 + dest: "{{ kube_config_dir }}/controller-manager-config.yml" + mode: 0644 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: "OCI Cloud Controller | Slurp Configuration" + slurp: + src: "{{ kube_config_dir }}/controller-manager-config.yml" + register: controller_manager_config + +- name: "OCI Cloud Controller | Encode Configuration" + set_fact: + controller_manager_config_base64: "{{ controller_manager_config.content }}" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: "OCI Cloud Controller | Generate Manifests" + template: + src: oci-cloud-provider.yml.j2 + dest: "{{ kube_config_dir }}/oci-cloud-provider.yml" + mode: 0644 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: "OCI Cloud Controller | Apply Manifests" + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/oci-cloud-provider.yml" + state: latest + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 new file mode 100644 index 0000000..b8dcc60 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 @@ -0,0 +1,90 @@ +{% macro private_key() %}{{ oci_private_key }}{% endmacro %} + +{% if oci_use_instance_principals %} + # (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm). + # Ensure you have setup the following OCI policies and your kubernetes nodes are running within them + # allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name] + # allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name] + # allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name] +useInstancePrincipals: true +{% else %} +useInstancePrincipals: false +{% endif %} + +auth: + +{% if oci_use_instance_principals %} + # This key is put here too for backwards compatibility + useInstancePrincipals: true +{% else %} + useInstancePrincipals: false + + region: {{ oci_region_id }} + tenancy: {{ oci_tenancy_id }} + user: {{ oci_user_id }} + key: | + {{ oci_private_key }} + + {% if oci_private_key_passphrase is defined %} + passphrase: {{ oci_private_key_passphrase }} + {% endif %} + + + fingerprint: {{ oci_user_fingerprint }} +{% endif %} + +# compartment configures Compartment within which the cluster resides. +compartment: {{ oci_compartment_id }} + +# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides. +vcn: {{ oci_vnc_id }} + +loadBalancer: + # subnet1 configures one of two subnets to which load balancers will be added. + # OCI load balancers require two subnets to ensure high availability. + subnet1: {{ oci_subnet1_id }} +{% if oci_subnet2_id is defined %} + # subnet2 configures the second of two subnets to which load balancers will be + # added. OCI load balancers require two subnets to ensure high availability. + subnet2: {{ oci_subnet2_id }} +{% endif %} + # SecurityListManagementMode configures how security lists are managed by the CCM. + # "All" (default): Manage all required security list rules for load balancer services. + # "Frontend": Manage only security list rules for ingress to the load + # balancer. Requires that the user has setup a rule that + # allows inbound traffic to the appropriate ports for kube + # proxy health port, node port ranges, and health check port ranges. + # E.g. 10.82.0.0/16 30000-32000. + # "None": Disables all security list management. Requires that the + # user has setup a rule that allows inbound traffic to the + # appropriate ports for kube proxy health port, node port + # ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000. + # Additionally requires the user to mange rules to allow + # inbound traffic to load balancers. + securityListManagementMode: {{ oci_security_list_management }} + +{% if oci_security_lists is defined and oci_security_lists|length > 0 %} + # Optional specification of which security lists to modify per subnet. This does not apply if security list management is off. + securityLists: +{% for subnet_ocid, list_ocid in oci_security_lists.items() %} + {{ subnet_ocid }}: {{ list_ocid }} +{% endfor %} +{% endif %} + +{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %} +# Optional rate limit controls for accessing OCI API +rateLimiter: +{% if oci_rate_limit.rate_limit_qps_read %} + rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }} +{% endif %} +{% if oci_rate_limit.rate_limit_qps_write %} + rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }} +{% endif %} +{% if oci_rate_limit.rate_limit_bucket_read %} + rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }} +{% endif %} +{% if oci_rate_limit.rate_limit_bucket_write %} + rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }} +{% endif %} +{% endif %} + diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 new file mode 100644 index 0000000..bacd1e9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 @@ -0,0 +1,73 @@ +apiVersion: v1 +data: + cloud-provider.yaml: {{ controller_manager_config_base64 }} +kind: Secret +metadata: + name: oci-cloud-controller-manager + namespace: kube-system +type: Opaque + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: oci-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: oci-cloud-controller-manager +spec: + selector: + matchLabels: + component: oci-cloud-controller-manager + tier: control-plane + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + component: oci-cloud-controller-manager + tier: control-plane + spec: +{% if oci_cloud_controller_pull_secret is defined %} + imagePullSecrets: + - name: {{oci_cloud_controller_pull_secret}} +{% endif %} + serviceAccountName: cloud-controller-manager + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + volumes: + - name: cfg + secret: + secretName: oci-cloud-controller-manager + - name: kubernetes + hostPath: + path: /etc/kubernetes + containers: + - name: oci-cloud-controller-manager + image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}} + command: ["/usr/local/bin/oci-cloud-controller-manager"] + args: + - --cloud-config=/etc/oci/cloud-provider.yaml + - --cloud-provider=oci + - --leader-elect-resource-lock=configmaps + - -v=2 + volumeMounts: + - name: cfg + mountPath: /etc/oci + readOnly: true + - name: kubernetes + mountPath: /etc/kubernetes + readOnly: true + diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/defaults/main.yml new file mode 100644 index 0000000..f26583d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/defaults/main.yml @@ -0,0 +1,65 @@ +--- + +podsecuritypolicy_restricted_spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + runAsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + +podsecuritypolicy_privileged_spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: + - '*' + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + runAsGroup: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags + allowedUnsafeSysctls: + - '*' diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml new file mode 100644 index 0000000..479fb57 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml @@ -0,0 +1,8 @@ +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: k8s-cluster-critical +value: 1000000000 +globalDefault: false +description: "This priority class should only be used by the pods installed using kubespray." diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/files/oci-rbac.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/files/oci-rbac.yml new file mode 100644 index 0000000..5e3b82b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/files/oci-rbac.yml @@ -0,0 +1,124 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloud-controller-manager +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + +- apiGroups: + - "" + resources: + - services + verbs: + - list + - watch + - patch + +- apiGroups: + - "" + resources: + - services/status + verbs: + - update + +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +# For leader election +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + +- apiGroups: + - "" + resources: + - endpoints + resourceNames: + - "cloud-controller-manager" + verbs: + - get + - list + - watch + - update + +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - "cloud-controller-manager" + verbs: + - get + - update + +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + +# For the PVL +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - list + - watch + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: oci-cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/tasks/main.yml new file mode 100644 index 0000000..ddbddba --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -0,0 +1,109 @@ +--- +- name: Kubernetes Apps | Wait for kube-apiserver + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + client_cert: "{{ kube_apiserver_client_cert }}" + client_key: "{{ kube_apiserver_client_key }}" + register: result + until: result.status == 200 + retries: 10 + delay: 6 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes + template: + src: "node-crb.yml.j2" + dest: "{{ kube_config_dir }}/node-crb.yml" + mode: 0640 + register: node_crb_manifest + when: + - rbac_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Apply workaround to allow all nodes with cert O=system:nodes to register + kube: + name: "kubespray:system:node" + kubectl: "{{ bin_dir }}/kubectl" + resource: "clusterrolebinding" + filename: "{{ kube_config_dir }}/node-crb.yml" + state: latest + register: result + until: result is succeeded + retries: 10 + delay: 6 + when: + - rbac_enabled + - node_crb_manifest.changed + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet + template: + src: "node-webhook-cr.yml.j2" + dest: "{{ kube_config_dir }}/node-webhook-cr.yml" + mode: 0640 + register: node_webhook_cr_manifest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- name: Apply webhook ClusterRole + kube: + name: "system:node-webhook" + kubectl: "{{ bin_dir }}/kubectl" + resource: "clusterrole" + filename: "{{ kube_config_dir }}/node-webhook-cr.yml" + state: latest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - node_webhook_cr_manifest.changed + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole + template: + src: "node-webhook-crb.yml.j2" + dest: "{{ kube_config_dir }}/node-webhook-crb.yml" + mode: 0640 + register: node_webhook_crb_manifest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- name: Grant system:nodes the webhook ClusterRole + kube: + name: "system:node-webhook" + kubectl: "{{ bin_dir }}/kubectl" + resource: "clusterrolebinding" + filename: "{{ kube_config_dir }}/node-webhook-crb.yml" + state: latest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - node_webhook_crb_manifest.changed + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- include_tasks: oci.yml + tags: oci + when: + - cloud_provider is defined + - cloud_provider == 'oci' + +- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file + copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640 + when: inventory_hostname == groups['kube_control_plane']|last + +- name: PriorityClass | Create k8s-cluster-critical + kube: + name: k8s-cluster-critical + kubectl: "{{ bin_dir }}/kubectl" + resource: "PriorityClass" + filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" + state: latest + when: inventory_hostname == groups['kube_control_plane']|last diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/tasks/oci.yml new file mode 100644 index 0000000..eb07463 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/tasks/oci.yml @@ -0,0 +1,19 @@ +--- +- name: Copy OCI RBAC Manifest + copy: + src: "oci-rbac.yml" + dest: "{{ kube_config_dir }}/oci-rbac.yml" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider == 'oci' + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Apply OCI RBAC + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/oci-rbac.yml" + when: + - cloud_provider is defined + - cloud_provider == 'oci' + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 new file mode 100644 index 0000000..f2e115a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "kube-system" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 new file mode 100644 index 0000000..9a4a3c4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: kubespray:system:node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 new file mode 100644 index 0000000..bf9aaf7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:node-webhook +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 new file mode 100644 index 0000000..68aed5c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:node-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-webhook +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 new file mode 100644 index 0000000..99da046 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:vsphere-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:vsphere-cloud-provider +roleRef: + kind: ClusterRole + name: system:vsphere-cloud-provider + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: vsphere-cloud-provider + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml new file mode 100644 index 0000000..c82c5d8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: kubernetes-apps/container_engine_accelerator/nvidia_gpu + when: nvidia_accelerator_enabled + tags: + - apps + - nvidia_gpu + - container_engine_accelerator diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml new file mode 100644 index 0000000..6e870e4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml @@ -0,0 +1,14 @@ +--- +nvidia_accelerator_enabled: false +nvidia_driver_version: "390.87" +nvidia_gpu_tesla_base_url: https://us.download.nvidia.com/tesla/ +nvidia_gpu_gtx_base_url: http://us.download.nvidia.com/XFree86/Linux-x86_64/ +nvidia_gpu_flavor: tesla +nvidia_url_end: "{{ nvidia_driver_version }}/NVIDIA-Linux-x86_64-{{ nvidia_driver_version }}.run" +nvidia_driver_install_container: false +nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +nvidia_driver_install_ubuntu_container: registry.k8s.io/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +nvidia_driver_install_supported: false +nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" +nvidia_gpu_nodes: [] +nvidia_gpu_device_plugin_memory: 30Mi diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml new file mode 100644 index 0000000..62ecaf9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml @@ -0,0 +1,55 @@ +--- + +- name: Container Engine Acceleration Nvidia GPU| gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + skip: true + +- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla + set_fact: + nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}" + when: nvidia_gpu_flavor|lower == "tesla" + +- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX + set_fact: + nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}" + when: nvidia_gpu_flavor|lower == "gtx" + +- name: Container Engine Acceleration Nvidia GPU | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/container_engine_accelerator" + owner: root + group: root + mode: 0755 + recurse: true + +- name: Container Engine Acceleration Nvidia GPU | Create manifests for nvidia accelerators + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.file }}" + mode: 0644 + with_items: + - { name: nvidia-driver-install-daemonset, file: nvidia-driver-install-daemonset.yml, type: daemonset } + - { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset } + register: container_engine_accelerator_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container + +- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ container_engine_accelerator_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 new file mode 100644 index 0000000..c5a7f51 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nvidia-gpu-device-plugin + namespace: kube-system + labels: + k8s-app: nvidia-gpu-device-plugin + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nvidia-gpu-device-plugin + template: + metadata: + labels: + k8s-app: nvidia-gpu-device-plugin + spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "nvidia.com/gpu" + operator: Exists + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + volumes: + - name: device-plugin + hostPath: + path: /var/lib/kubelet/device-plugins + - name: dev + hostPath: + path: /dev + containers: + - image: "{{ nvidia_gpu_device_plugin_container }}" + command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] + name: nvidia-gpu-device-plugin + resources: + requests: + cpu: 50m + memory: {{ nvidia_gpu_device_plugin_memory }} + limits: + cpu: 50m + memory: {{ nvidia_gpu_device_plugin_memory }} + securityContext: + privileged: true + volumeMounts: + - name: device-plugin + mountPath: /device-plugin + - name: dev + mountPath: /dev + updateStrategy: + type: RollingUpdate diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 new file mode 100644 index 0000000..ea097ed --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 @@ -0,0 +1,82 @@ +# Copyright 2017 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nvidia-driver-installer + namespace: kube-system +spec: + selector: + matchLabels: + name: nvidia-driver-installer + template: + metadata: + labels: + name: nvidia-driver-installer + spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "nvidia.com/gpu" + operator: Exists + tolerations: + - key: "nvidia.com/gpu" + effect: "NoSchedule" + operator: "Exists" + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + volumes: + - name: dev + hostPath: + path: /dev + - name: nvidia-install-dir-host + hostPath: + path: /home/kubernetes/bin/nvidia + - name: root-mount + hostPath: + path: / + initContainers: + - image: "{{ nvidia_driver_install_container }}" + name: nvidia-driver-installer + resources: + requests: + cpu: 0.15 + securityContext: + privileged: true + env: + - name: NVIDIA_INSTALL_DIR_HOST + value: /home/kubernetes/bin/nvidia + - name: NVIDIA_INSTALL_DIR_CONTAINER + value: /usr/local/nvidia + - name: ROOT_MOUNT_DIR + value: /root + - name: NVIDIA_DRIVER_VERSION + value: "{{ nvidia_driver_version }}" + - name: NVIDIA_DRIVER_DOWNLOAD_URL + value: "{{ nvidia_driver_download_url_default }}" + volumeMounts: + - name: nvidia-install-dir-host + mountPath: /usr/local/nvidia + - name: dev + mountPath: /dev + - name: root-mount + mountPath: /root + containers: + - image: "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" + name: pause diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml new file mode 100644 index 0000000..b1ea65b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml @@ -0,0 +1,3 @@ +--- +nvidia_driver_install_container: "{{ nvidia_driver_install_centos_container }}" +nvidia_driver_install_supported: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml new file mode 100644 index 0000000..f1bfdfc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml @@ -0,0 +1,3 @@ +--- +nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}" +nvidia_driver_install_supported: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml new file mode 100644 index 0000000..f1bfdfc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml @@ -0,0 +1,3 @@ +--- +nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}" +nvidia_driver_install_supported: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml new file mode 100644 index 0000000..46384d2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: crun | Copy runtime class manifest + template: + src: runtimeclass-crun.yml + dest: "{{ kube_config_dir }}/runtimeclass-crun.yml" + mode: "0664" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: crun | Apply manifests + kube: + name: "runtimeclass-crun" + kubectl: "{{ bin_dir }}/kubectl" + resource: "runtimeclass" + filename: "{{ kube_config_dir }}/runtimeclass-crun.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml new file mode 100644 index 0000000..99d97e6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml @@ -0,0 +1,6 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: crun +handler: crun diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml new file mode 100644 index 0000000..b5b881e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml @@ -0,0 +1,34 @@ +--- +- name: gVisor | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/gvisor" + owner: root + group: root + mode: 0755 + recurse: true + +- name: gVisor | Templates List + set_fact: + gvisor_templates: + - { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass } + +- name: gVisort | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}" + mode: 0644 + with_items: "{{ gvisor_templates }}" + register: gvisor_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: gVisor | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/gvisor/{{ item.item.file }}" + state: "latest" + with_items: "{{ gvisor_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2 new file mode 100644 index 0000000..64465fa --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2 @@ -0,0 +1,6 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: gvisor +handler: runsc diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml new file mode 100644 index 0000000..6eacb79 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml @@ -0,0 +1,5 @@ +--- + +kata_containers_qemu_overhead: true +kata_containers_qemu_overhead_fixed_cpu: 250m +kata_containers_qemu_overhead_fixed_memory: 160Mi diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml new file mode 100644 index 0000000..a07c7c2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml @@ -0,0 +1,35 @@ +--- + +- name: Kata Containers | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/kata_containers" + owner: root + group: root + mode: 0755 + recurse: true + +- name: Kata Containers | Templates list + set_fact: + kata_containers_templates: + - { name: runtimeclass-kata-qemu, file: runtimeclass-kata-qemu.yml, type: runtimeclass } + +- name: Kata Containers | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/kata_containers/{{ item.file }}" + mode: 0644 + with_items: "{{ kata_containers_templates }}" + register: kata_containers_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kata Containers | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/kata_containers/{{ item.item.file }}" + state: "latest" + with_items: "{{ kata_containers_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2 new file mode 100644 index 0000000..2240cdb --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2 @@ -0,0 +1,12 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata-qemu +handler: kata-qemu +{% if kata_containers_qemu_overhead %} +overhead: + podFixed: + cpu: {{ kata_containers_qemu_overhead_fixed_cpu }} + memory: {{ kata_containers_qemu_overhead_fixed_memory }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/meta/main.yml new file mode 100644 index 0000000..8584117 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/meta/main.yml @@ -0,0 +1,31 @@ +--- +dependencies: + - role: kubernetes-apps/container_runtimes/kata_containers + when: kata_containers_enabled + tags: + - apps + - kata-containers + - container-runtimes + + - role: kubernetes-apps/container_runtimes/gvisor + when: gvisor_enabled + tags: + - apps + - gvisor + - container-runtimes + + - role: kubernetes-apps/container_runtimes/crun + when: crun_enabled + tags: + - apps + - crun + - container-runtimes + + - role: kubernetes-apps/container_runtimes/youki + when: + - youki_enabled + - container_manager == 'crio' + tags: + - apps + - youki + - container-runtimes diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml new file mode 100644 index 0000000..6da025f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: youki | Copy runtime class manifest + template: + src: runtimeclass-youki.yml + dest: "{{ kube_config_dir }}/runtimeclass-youki.yml" + mode: "0664" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: youki | Apply manifests + kube: + name: "runtimeclass-youki" + kubectl: "{{ bin_dir }}/kubectl" + resource: "runtimeclass" + filename: "{{ kube_config_dir }}/runtimeclass-youki.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml new file mode 100644 index 0000000..b68bd06 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml @@ -0,0 +1,6 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: youki +handler: youki diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/OWNERS b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/OWNERS new file mode 100644 index 0000000..6cfbaa8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - alijahnas + - luckySB diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml new file mode 100644 index 0000000..33df37c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml @@ -0,0 +1,11 @@ +--- +aws_ebs_csi_enable_volume_scheduling: true +aws_ebs_csi_enable_volume_snapshot: false +aws_ebs_csi_enable_volume_resizing: false +aws_ebs_csi_controller_replicas: 1 +aws_ebs_csi_plugin_image_tag: latest + +# Add annotions to ebs_csi_controller. Useful if using kube2iam for role assumption +# aws_ebs_csi_annotations: +# - key: iam.amazonaws.com/role +# value: your-ebs-role-arn diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml new file mode 100644 index 0000000..5570dcc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: AWS CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: aws-ebs-csi-driver, file: aws-ebs-csi-driver.yml} + - {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice-rbac.yml} + - {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml} + - {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml} + register: aws_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: AWS CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ aws_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice-rbac.yml.j2 new file mode 100644 index 0000000..87bfa31 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice-rbac.yml.j2 @@ -0,0 +1,180 @@ +# Controller Service +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +# The permissions in this ClusterRole are tightly coupled with the version of csi-attacher used. More information about this can be found in kubernetes-csi/external-attacher. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +{% if aws_ebs_csi_enable_volume_snapshot %} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +{% endif %} + +{% if aws_ebs_csi_enable_volume_resizing %} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-resizer-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2 new file mode 100644 index 0000000..ffce40b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2 @@ -0,0 +1,132 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ebs-csi-controller + namespace: kube-system +spec: + replicas: {{ aws_ebs_csi_controller_replicas }} + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver +{% if aws_ebs_csi_annotations is defined %} + annotations: +{% for annotation in aws_ebs_csi_annotations %} + {{ annotation.key }}: {{ annotation.value }} +{% endfor %} +{% endif %} + spec: + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ebs-csi-controller-sa + priorityClassName: system-cluster-critical + containers: + - name: ebs-plugin + image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }} + args: + - --endpoint=$(CSI_ENDPOINT) +{% if aws_ebs_csi_extra_volume_tags is defined %} + - --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }} +{% endif %} + - --logtostderr + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-secret + key: key_id + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-secret + key: access_key + optional: true + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + - name: csi-provisioner + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --v=5 +{% if aws_ebs_csi_enable_volume_scheduling %} + - --feature-gates=Topology=true +{% endif %} + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-attacher + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ +{% if aws_ebs_csi_enable_volume_snapshot %} + - name: csi-snapshotter + image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --timeout=15s + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ +{% endif %} +{% if aws_ebs_csi_enable_volume_resizing %} + - name: csi-resizer + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ +{% endif %} + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + volumes: + - name: socket-dir + emptyDir: {} + diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2 new file mode 100644 index 0000000..99c6c5b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: ebs.csi.aws.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2 new file mode 100644 index 0000000..1dc1925 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2 @@ -0,0 +1,101 @@ +--- +# Node Service +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ebs-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + priorityClassName: system-node-critical + containers: + - name: ebs-plugin + securityContext: + privileged: true + image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }} + args: + - --endpoint=$(CSI_ENDPOINT) +{% if aws_ebs_csi_extra_volume_tags is defined %} + - --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }} +{% endif %} + - --logtostderr + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + - name: node-driver-registrar + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: device-dir + hostPath: + path: /dev + type: Directory diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml new file mode 100644 index 0000000..341cc97 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml @@ -0,0 +1,6 @@ +--- +azure_csi_use_instance_metadata: true +azure_csi_controller_replicas: 2 +azure_csi_plugin_image_tag: latest +azure_csi_controller_affinity: {} +azure_csi_node_affinity: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/tasks/azure-credential-check.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/tasks/azure-credential-check.yml new file mode 100644 index 0000000..0a858ee --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/tasks/azure-credential-check.yml @@ -0,0 +1,54 @@ +--- +- name: Azure CSI Driver | check azure_csi_tenant_id value + fail: + msg: "azure_csi_tenant_id is missing" + when: azure_csi_tenant_id is not defined or not azure_csi_tenant_id + +- name: Azure CSI Driver | check azure_csi_subscription_id value + fail: + msg: "azure_csi_subscription_id is missing" + when: azure_csi_subscription_id is not defined or not azure_csi_subscription_id + +- name: Azure CSI Driver | check azure_csi_aad_client_id value + fail: + msg: "azure_csi_aad_client_id is missing" + when: azure_csi_aad_client_id is not defined or not azure_csi_aad_client_id + +- name: Azure CSI Driver | check azure_csi_aad_client_secret value + fail: + msg: "azure_csi_aad_client_secret is missing" + when: azure_csi_aad_client_secret is not defined or not azure_csi_aad_client_secret + +- name: Azure CSI Driver | check azure_csi_resource_group value + fail: + msg: "azure_csi_resource_group is missing" + when: azure_csi_resource_group is not defined or not azure_csi_resource_group + +- name: Azure CSI Driver | check azure_csi_location value + fail: + msg: "azure_csi_location is missing" + when: azure_csi_location is not defined or not azure_csi_location + +- name: Azure CSI Driver | check azure_csi_subnet_name value + fail: + msg: "azure_csi_subnet_name is missing" + when: azure_csi_subnet_name is not defined or not azure_csi_subnet_name + +- name: Azure CSI Driver | check azure_csi_security_group_name value + fail: + msg: "azure_csi_security_group_name is missing" + when: azure_csi_security_group_name is not defined or not azure_csi_security_group_name + +- name: Azure CSI Driver | check azure_csi_vnet_name value + fail: + msg: "azure_csi_vnet_name is missing" + when: azure_csi_vnet_name is not defined or not azure_csi_vnet_name + +- name: Azure CSI Driver | check azure_csi_vnet_resource_group value + fail: + msg: "azure_csi_vnet_resource_group is missing" + when: azure_csi_vnet_resource_group is not defined or not azure_csi_vnet_resource_group + +- name: "Azure CSI Driver | check azure_csi_use_instance_metadata is a bool" + assert: + that: azure_csi_use_instance_metadata | type_debug == 'bool' diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml new file mode 100644 index 0000000..67ce865 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- include_tasks: azure-credential-check.yml + +- name: Azure CSI Driver | Write Azure CSI cloud-config + template: + src: "azure-csi-cloud-config.j2" + dest: "{{ kube_config_dir }}/azure_csi_cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Azure CSI Driver | Get base64 cloud-config + slurp: + src: "{{ kube_config_dir }}/azure_csi_cloud_config" + register: cloud_config_secret + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Azure CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: azure-csi-azuredisk-driver, file: azure-csi-azuredisk-driver.yml} + - {name: azure-csi-cloud-config-secret, file: azure-csi-cloud-config-secret.yml} + - {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller-rbac.yml} + - {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller.yml} + - {name: azure-csi-azuredisk-node-rbac, file: azure-csi-azuredisk-node-rbac.yml} + - {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml} + register: azure_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Azure CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ azure_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller-rbac.yml.j2 new file mode 100644 index 0000000..16f4c98 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller-rbac.yml.j2 @@ -0,0 +1,230 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-cluster-driver-registrar-role +rules: + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csidrivers"] + verbs: ["create", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-driver-registrar-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-cluster-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller.yml.j2 new file mode 100644 index 0000000..36d38ac --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller.yml.j2 @@ -0,0 +1,179 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: {{ azure_csi_controller_replicas }} + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + tolerations: + - key: "node-role.kubernetes.io/master" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + effect: "NoSchedule" +{% if azure_csi_controller_affinity %} + affinity: + {{ azure_csi_controller_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} + containers: + - name: csi-provisioner + image: {{ azure_csi_image_repo }}/csi-provisioner:{{ azure_csi_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=15s" + - "--leader-election" + - "--worker-threads=40" + - "--extra-create-metadata=true" + - "--strict-topology=true" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: {{ azure_csi_image_repo }}/csi-attacher:{{ azure_csi_attacher_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=600s" + - "-leader-election" + - "-worker-threads=500" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: {{ azure_csi_image_repo }}/csi-snapshotter:{{ azure_csi_snapshotter_image_tag }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "-v=2" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: {{ azure_csi_image_repo }}/csi-resizer:{{ azure_csi_resizer_image_tag }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - '-handle-volume-inuse-error=false' + - "-timeout=60s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--disable-avset-nodes=true" + - "--drivername=disk.csi.azure.com" + - "--cloud-config-secret-name=cloud-config" + - "--cloud-config-secret-namespace=kube-system" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + value: "/etc/kubernetes/azure.json" + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + readOnly: true + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + secret: + secretName: cloud-config diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2 new file mode 100644 index 0000000..c7cba34 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com +spec: + attachRequired: true + podInfoOnMount: true + volumeLifecycleModes: # added in Kubernetes 1.16 + - Persistent diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node-rbac.yml.j2 new file mode 100644 index 0000000..d55ea0d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node-rbac.yml.j2 @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node.yml.j2 new file mode 100644 index 0000000..4d80319 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node.yml.j2 @@ -0,0 +1,168 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux +{% if azure_csi_node_affinity %} + affinity: + {{ azure_csi_node_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} + priorityClassName: system-node-critical + tolerations: + - operator: Exists + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: {{ azure_csi_image_repo }}/csi-node-driver-registrar:{{ azure_csi_node_registrar_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--enable-perf-optimization=true" + - "--drivername=disk.csi.azure.com" + - "--volume-attach-limit=-1" + - "--cloud-config-secret-name=cloud-config" + - "--cloud-config-secret-namespace=kube-system" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + - containerPort: 29605 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + value: "/etc/kubernetes/azure.json" + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/scsi_host/ + name: scsi-host-dir + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - secret: + defaultMode: 0644 + secretName: cloud-config + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/scsi_host/ + type: Directory + name: scsi-host-dir diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..f259cec --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2 @@ -0,0 +1,7 @@ +kind: Secret +apiVersion: v1 +metadata: + name: cloud-config + namespace: kube-system +data: + azure.json: {{ cloud_config_secret.content }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config.j2 new file mode 100644 index 0000000..d3932f5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config.j2 @@ -0,0 +1,14 @@ +{ + "cloud":"AzurePublicCloud", + "tenantId": "{{ azure_csi_tenant_id }}", + "subscriptionId": "{{ azure_csi_subscription_id }}", + "aadClientId": "{{ azure_csi_aad_client_id }}", + "aadClientSecret": "{{ azure_csi_aad_client_secret }}", + "location": "{{ azure_csi_location }}", + "resourceGroup": "{{ azure_csi_resource_group }}", + "vnetName": "{{ azure_csi_vnet_name }}", + "vnetResourceGroup": "{{ azure_csi_vnet_resource_group }}", + "subnetName": "{{ azure_csi_subnet_name }}", + "securityGroupName": "{{ azure_csi_security_group_name }}", + "useInstanceMetadata": {{ azure_csi_use_instance_metadata }}, +} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml new file mode 100644 index 0000000..6a13e86 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml @@ -0,0 +1,30 @@ +--- +# To access Cinder, the CSI controller will need credentials to access +# openstack apis. Per default this values will be +# read from the environment. +cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +cinder_username: "{{ lookup('env','OS_USERNAME') }}" +cinder_password: "{{ lookup('env','OS_PASSWORD') }}" +cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}" +cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}" +cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}" +cinder_region: "{{ lookup('env','OS_REGION_NAME') }}" +cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}" +cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}" +cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" +cinder_cacert: "{{ lookup('env','OS_CACERT') }}" + +# For now, only Cinder v3 is supported in Cinder CSI driver +cinder_blockstorage_version: "v3" +cinder_csi_controller_replicas: 1 + +# Optional. Set to true, to rescan block device and verify its size before expanding +# the filesystem. +# Not all hypervizors have a /sys/class/block/XXX/device/rescan location, therefore if +# you enable this option and your hypervizor doesn't support this, you'll get a warning +# log on resize event. It is recommended to disable this option in this case. +# Defaults to false +# cinder_csi_rescan_on_resize: true + +cinder_tolerations: [] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml new file mode 100644 index 0000000..cb65f42 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml @@ -0,0 +1,59 @@ +--- +- name: Cinder CSI Driver | check cinder_auth_url value + fail: + msg: "cinder_auth_url is missing" + when: cinder_auth_url is not defined or not cinder_auth_url + +- name: Cinder CSI Driver | check cinder_username value cinder_application_credential_name value + fail: + msg: "you must either set cinder_username or cinder_application_credential_name" + when: + - cinder_username is not defined or not cinder_username + - cinder_application_credential_name is not defined or not cinder_application_credential_name + +- name: Cinder CSI Driver | check cinder_application_credential_id value + fail: + msg: "cinder_application_credential_id is missing" + when: + - cinder_application_credential_name is defined + - cinder_application_credential_name|length > 0 + - cinder_application_credential_id is not defined or not cinder_application_credential_id + +- name: Cinder CSI Driver | check cinder_application_credential_secret value + fail: + msg: "cinder_application_credential_secret is missing" + when: + - cinder_application_credential_name is defined + - cinder_application_credential_name|length > 0 + - cinder_application_credential_secret is not defined or not cinder_application_credential_secret + +- name: Cinder CSI Driver | check cinder_password value + fail: + msg: "cinder_password is missing" + when: + - cinder_username is defined + - cinder_username|length > 0 + - cinder_application_credential_name is not defined or not cinder_application_credential_name + - cinder_application_credential_secret is not defined or not cinder_application_credential_secret + - cinder_password is not defined or not cinder_password + +- name: Cinder CSI Driver | check cinder_region value + fail: + msg: "cinder_region is missing" + when: cinder_region is not defined or not cinder_region + +- name: Cinder CSI Driver | check cinder_tenant_id value + fail: + msg: "one of cinder_tenant_id or cinder_tenant_name must be specified" + when: + - cinder_tenant_id is not defined or not cinder_tenant_id + - cinder_tenant_name is not defined or not cinder_tenant_name + - cinder_application_credential_name is not defined or not cinder_application_credential_name + +- name: Cinder CSI Driver | check cinder_domain_id value + fail: + msg: "one of cinder_domain_id or cinder_domain_name must be specified" + when: + - cinder_domain_id is not defined or not cinder_domain_id + - cinder_domain_name is not defined or not cinder_domain_name + - cinder_application_credential_name is not defined or not cinder_application_credential_name diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml new file mode 100644 index 0000000..c6d14a2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml @@ -0,0 +1,11 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: Cinder CSI Driver | Write cacert file + copy: + src: "{{ cinder_cacert }}" + dest: "{{ kube_config_dir }}/cinder-cacert.pem" + group: "{{ kube_cert_group }}" + mode: 0640 + delegate_to: "{{ delegate_host_to_write_cacert }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml new file mode 100644 index 0000000..7d5affe --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- include_tasks: cinder-credential-check.yml + +- name: Cinder CSI Driver | Write cacert file + include_tasks: cinder-write-cacert.yml + run_once: true + loop: "{{ groups['k8s_cluster'] }}" + loop_control: + loop_var: delegate_host_to_write_cacert + when: + - inventory_hostname in groups['k8s_cluster'] + - cinder_cacert is defined + - cinder_cacert | length > 0 + +- name: Cinder CSI Driver | Write Cinder cloud-config + template: + src: "cinder-csi-cloud-config.j2" + dest: "{{ kube_config_dir }}/cinder_cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cinder CSI Driver | Get base64 cloud-config + slurp: + src: "{{ kube_config_dir }}/cinder_cloud_config" + register: cloud_config_secret + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cinder CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: cinder-csi-driver, file: cinder-csi-driver.yml} + - {name: cinder-csi-cloud-config-secret, file: cinder-csi-cloud-config-secret.yml} + - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin-rbac.yml} + - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin.yml} + - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin-rbac.yml} + - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml} + - {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml} + register: cinder_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cinder CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ cinder_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..cb3cba6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2 @@ -0,0 +1,10 @@ +# This YAML file contains secret objects, +# which are necessary to run csi cinder plugin. + +kind: Secret +apiVersion: v1 +metadata: + name: cloud-config + namespace: kube-system +data: + cloud.conf: {{ cloud_config_secret.content }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config.j2 new file mode 100644 index 0000000..1a83f7d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config.j2 @@ -0,0 +1,44 @@ +[Global] +auth-url="{{ cinder_auth_url }}" +{% if cinder_application_credential_id|length == 0 and cinder_application_credential_name|length == 0 %} +username="{{ cinder_username }}" +password="{{ cinder_password }}" +{% endif %} +{% if cinder_application_credential_id|length > 0 %} +application-credential-id={{ cinder_application_credential_id }} +{% endif %} +{% if cinder_application_credential_name|length > 0 %} +application-credential-name={{ cinder_application_credential_name }} +{% endif %} +{% if cinder_application_credential_secret|length > 0 %} +application-credential-secret={{ cinder_application_credential_secret }} +{% endif %} +region="{{ cinder_region }}" +{% if cinder_tenant_id|length > 0 %} +tenant-id="{{ cinder_tenant_id }}" +{% endif %} +{% if cinder_tenant_name|length > 0 %} +tenant-name="{{ cinder_tenant_name }}" +{% endif %} +{% if cinder_domain_name|length > 0 %} +domain-name="{{ cinder_domain_name }}" +{% elif cinder_domain_id|length > 0 %} +domain-id ="{{ cinder_domain_id }}" +{% endif %} +{% if cinder_cacert|length > 0 %} +ca-file="{{ kube_config_dir }}/cinder-cacert.pem" +{% endif %} + +[BlockStorage] +{% if cinder_blockstorage_version is defined %} +bs-version={{ cinder_blockstorage_version }} +{% endif %} +{% if cinder_csi_ignore_volume_az is defined %} +ignore-volume-az={{ cinder_csi_ignore_volume_az | bool }} +{% endif %} +{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %} +node-volume-attach-limit="{{ node_volume_attach_limit }}" +{% endif %} +{% if cinder_csi_rescan_on_resize is defined %} +rescan-on-resize={{ cinder_csi_rescan_on_resize | bool }} +{% endif %} \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 new file mode 100644 index 0000000..d40053a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 @@ -0,0 +1,179 @@ +# This YAML file contains RBAC API objects, +# which are necessary to run csi controller plugin + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-cinder-controller-sa + namespace: kube-system + +--- +# external attacher +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- +# external Provisioner +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +# external snapshotter +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +# External Resizer +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +rules: + # The following rule should be uncommented for plugins that require secrets + # for provisioning. + # - apiGroups: [""] + # resources: ["secrets"] + # verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-resizer-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 new file mode 100644 index 0000000..6bd671a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 @@ -0,0 +1,156 @@ +# This YAML file contains CSI Controller Plugin Sidecars +# external-attacher, external-provisioner, external-snapshotter + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-cinder-controllerplugin + namespace: kube-system +spec: + replicas: {{ cinder_csi_controller_replicas }} + selector: + matchLabels: + app: csi-cinder-controllerplugin + template: + metadata: + labels: + app: csi-cinder-controllerplugin + spec: + serviceAccountName: csi-cinder-controller-sa + containers: + - name: csi-attacher + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - --leader-election=true +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-provisioner + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" + - "--default-fstype=ext4" + - "--extra-create-metadata" +{% if cinder_topology is defined and cinder_topology %} + - --feature-gates=Topology=true +{% endif %} +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - "--leader-election=true" +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-snapshotter + image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" + - "--extra-create-metadata" +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - --leader-election=true +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - name: csi-resizer + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" + - "--handle-volume-inuse-error=false" +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - --leader-election=true +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - name: cinder-csi-plugin + image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - /bin/cinder-csi-plugin + - "--endpoint=$(CSI_ENDPOINT)" + - "--cloud-config=$(CLOUD_CONFIG)" + - "--cluster=$(CLUSTER_NAME)" + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + - name: CLUSTER_NAME + value: kubernetes + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 10 + periodSeconds: 60 + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: secret-cinderplugin + mountPath: /etc/config + readOnly: true + - name: ca-certs + mountPath: /etc/ssl/certs + readOnly: true +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + mountPath: {{ kube_config_dir }}/cinder-cacert.pem + readOnly: true +{% endif %} + volumes: + - name: socket-dir + emptyDir: + - name: secret-cinderplugin + secret: + secretName: cloud-config + - name: ca-certs + hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + hostPath: + path: {{ kube_config_dir }}/cinder-cacert.pem + type: FileOrCreate +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 new file mode 100644 index 0000000..5b681e4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 @@ -0,0 +1,10 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: cinder.csi.openstack.org +spec: + attachRequired: true + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2 new file mode 100644 index 0000000..db58963 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2 @@ -0,0 +1,38 @@ +# This YAML defines all API objects to create RBAC roles for csi node plugin. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-cinder-node-sa + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-nodeplugin-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-nodeplugin-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-nodeplugin-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 new file mode 100644 index 0000000..3cdf9bb --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 @@ -0,0 +1,130 @@ +# This YAML file contains driver-registrar & csi driver nodeplugin API objects, +# which are necessary to run csi nodeplugin for cinder. + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-cinder-nodeplugin + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-cinder-nodeplugin + template: + metadata: + labels: + app: csi-cinder-nodeplugin + spec: + serviceAccountName: csi-cinder-node-sa + hostNetwork: true + containers: + - name: node-driver-registrar + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + args: + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: cinder-csi-plugin + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - /bin/cinder-csi-plugin + - "--endpoint=$(CSI_ENDPOINT)" + - "--cloud-config=$(CLOUD_CONFIG)" + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: pods-probe-dir + mountPath: /dev + mountPropagation: "HostToContainer" + - name: secret-cinderplugin + mountPath: /etc/config + readOnly: true + - name: ca-certs + mountPath: /etc/ssl/certs + readOnly: true +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + mountPath: {{ kube_config_dir }}/cinder-cacert.pem + readOnly: true +{% endif %} + volumes: + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/cinder.csi.openstack.org + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: pods-probe-dir + hostPath: + path: /dev + type: Directory + - name: secret-cinderplugin + secret: + secretName: cloud-config + - name: ca-certs + hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + hostPath: + path: {{ kube_config_dir }}/cinder-cacert.pem + type: FileOrCreate +{% endif %} +{% if cinder_tolerations %} + tolerations: + {{ cinder_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 new file mode 100644 index 0000000..391d3b3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 @@ -0,0 +1,14 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cinder-csi-pdb + namespace: kube-system +spec: +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + minAvailable: 1 +{% else %} + minAvailable: 0 +{% endif %} + selector: + matchLabels: + app: csi-cinder-controllerplugin diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml new file mode 100644 index 0000000..4790931 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: CSI CRD | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: volumesnapshotclasses, file: volumesnapshotclasses.yml} + - {name: volumesnapshotcontents, file: volumesnapshotcontents.yml} + - {name: volumesnapshots, file: volumesnapshots.yml} + register: csi_crd_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: CSI CRD | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + wait: true + with_items: + - "{{ csi_crd_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotclasses.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotclasses.yml.j2 new file mode 100644 index 0000000..47e5fd3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotclasses.yml.j2 @@ -0,0 +1,116 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotcontents.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotcontents.yml.j2 new file mode 100644 index 0000000..c611221 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotcontents.yml.j2 @@ -0,0 +1,305 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshots.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshots.yml.j2 new file mode 100644 index 0000000..1b41ff8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshots.yml.j2 @@ -0,0 +1,231 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml new file mode 100644 index 0000000..1ee662e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml @@ -0,0 +1,2 @@ +--- +gcp_pd_csi_controller_replicas: 1 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml new file mode 100644 index 0000000..59a99f7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: GCP PD CSI Driver | Check if cloud-sa.json exists + fail: + msg: "Credentials file cloud-sa.json is mandatory" + when: gcp_pd_csi_sa_cred_file is not defined or not gcp_pd_csi_sa_cred_file + +- name: GCP PD CSI Driver | Copy GCP credentials file + copy: + src: "{{ gcp_pd_csi_sa_cred_file }}" + dest: "{{ kube_config_dir }}/cloud-sa.json" + group: "{{ kube_cert_group }}" + mode: 0640 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: GCP PD CSI Driver | Get base64 cloud-sa.json + slurp: + src: "{{ kube_config_dir }}/cloud-sa.json" + register: gcp_cred_secret + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: GCP PD CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: gcp-pd-csi-cred-secret, file: gcp-pd-csi-cred-secret.yml} + - {name: gcp-pd-csi-setup, file: gcp-pd-csi-setup.yml} + - {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml} + - {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml} + register: gcp_pd_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: GCP PD CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ gcp_pd_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 new file mode 100644 index 0000000..4762093 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 @@ -0,0 +1,75 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-gce-pd-controller + namespace: kube-system +spec: + serviceName: "csi-gce-pd" + replicas: {{ gcp_pd_csi_controller_replicas }} + selector: + matchLabels: + app: gcp-compute-persistent-disk-csi-driver + template: + metadata: + labels: + app: gcp-compute-persistent-disk-csi-driver + spec: + # Host network must be used for interaction with Workload Identity in GKE + # since it replaces GCE Metadata Server with GKE Metadata Server. Remove + # this requirement when issue is resolved and before any exposure of + # metrics ports + hostNetwork: true + serviceAccountName: csi-gce-pd-controller-sa + priorityClassName: csi-gce-pd-controller + containers: + - name: csi-provisioner + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + - "--feature-gates=Topology=true" + - "--default-fstype=ext4" + # - "--run-controller-service=false" # disable the controller service of the CSI driver + # - "--run-node-service=false" # disable the node service of the CSI driver + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-attacher + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: gce-pd-driver + # Don't change base image without changing pdImagePlaceholder in + # test/k8s-integration/main.go + image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }} + args: + - "--v=5" + - "--endpoint=unix:/csi/csi.sock" + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/cloud-sa/cloud-sa.json" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: cloud-sa-volume + readOnly: true + mountPath: "/etc/cloud-sa" + volumes: + - name: socket-dir + emptyDir: {} + - name: cloud-sa-volume + secret: + secretName: cloud-sa + volumeClaimTemplates: [] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2 new file mode 100644 index 0000000..f8291a4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2 @@ -0,0 +1,8 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: cloud-sa + namespace: kube-system +data: + cloud-sa.json: {{ gcp_cred_secret.content }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 new file mode 100644 index 0000000..204ff97 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 @@ -0,0 +1,111 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-gce-pd-node + namespace: kube-system +spec: + selector: + matchLabels: + app: gcp-compute-persistent-disk-csi-driver + template: + metadata: + labels: + app: gcp-compute-persistent-disk-csi-driver + spec: + # Host network must be used for interaction with Workload Identity in GKE + # since it replaces GCE Metadata Server with GKE Metadata Server. Remove + # this requirement when issue is resolved and before any exposure of + # metrics ports. + hostNetwork: true + priorityClassName: csi-gce-pd-node + serviceAccountName: csi-gce-pd-node-sa + containers: + - name: csi-driver-registrar + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock" + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/pd.csi.storage.gke.io /registration/pd.csi.storage.gke.io-reg.sock"] + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: gce-pd-driver + securityContext: + privileged: true + # Don't change base image without changing pdImagePlaceholder in + # test/k8s-integration/main.go + image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }} + args: + - "--v=5" + - "--endpoint=unix:/csi/csi.sock" + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + # The following mounts are required to trigger host udevadm from + # container + - name: udev-rules-etc + mountPath: /etc/udev + - name: udev-rules-lib + mountPath: /lib/udev + - name: udev-socket + mountPath: /run/udev + - name: sys + mountPath: /sys + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/ + type: DirectoryOrCreate + - name: device-dir + hostPath: + path: /dev + type: Directory + # The following mounts are required to trigger host udevadm from + # container + - name: udev-rules-etc + hostPath: + path: /etc/udev + type: Directory + - name: udev-rules-lib + hostPath: + path: /lib/udev + type: Directory + - name: udev-socket + hostPath: + path: /run/udev + type: Directory + - name: sys + hostPath: + path: /sys + type: Directory + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # See "special case". This will tolerate everything. Node component should + # be scheduled on all nodes. + tolerations: + - operator: Exists diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-setup.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-setup.yml.j2 new file mode 100644 index 0000000..4c693b3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-setup.yml.j2 @@ -0,0 +1,200 @@ +##### Node Service Account, Roles, RoleBindings +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-gce-pd-node-sa + namespace: kube-system + +--- +##### Controller Service Account, Roles, Rolebindings +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-gce-pd-controller-sa + namespace: kube-system + +--- +# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-controller-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-gce-pd-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-controller-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-gce-pd-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: csi-gce-pd-controller +value: 900000000 +globalDefault: false +description: "This priority class should be used for the GCE PD CSI driver controller deployment only." + +--- + +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: csi-gce-pd-node +value: 900001000 +globalDefault: false +description: "This priority class should be used for the GCE PD CSI driver node deployment only." + +--- + +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-resizer-binding +subjects: + - kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-gce-pd-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: csi-gce-pd-node-psp +spec: + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + privileged: true + volumes: + - '*' + hostNetwork: true + allowedHostPaths: + - pathPrefix: "/var/lib/kubelet/plugins_registry/" + - pathPrefix: "/var/lib/kubelet" + - pathPrefix: "/var/lib/kubelet/plugins/pd.csi.storage.gke.io/" + - pathPrefix: "/dev" + - pathPrefix: "/etc/udev" + - pathPrefix: "/lib/udev" + - pathPrefix: "/run/udev" + - pathPrefix: "/sys" +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-node-deploy +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - csi-gce-pd-node-psp +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-node-deploy +subjects: +- kind: ServiceAccount + name: csi-gce-pd-node-sa + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml new file mode 100644 index 0000000..657b300 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml @@ -0,0 +1,16 @@ +--- +upcloud_csi_controller_replicas: 1 +upcloud_csi_provisioner_image_tag: "v3.1.0" +upcloud_csi_attacher_image_tag: "v3.4.0" +upcloud_csi_resizer_image_tag: "v1.4.0" +upcloud_csi_plugin_image_tag: "v0.3.3" +upcloud_csi_node_image_tag: "v2.5.0" +upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}" +upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}" +upcloud_tolerations: [] +upcloud_csi_enable_volume_snapshot: false +upcloud_csi_snapshot_controller_replicas: 2 +upcloud_csi_snapshotter_image_tag: "v4.2.1" +upcloud_csi_snapshot_controller_image_tag: "v4.2.1" +upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1" +upcloud_cacert: "{{ lookup('env','OS_CACERT') }}" \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml new file mode 100644 index 0000000..f37daba --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: UpCloud CSI Driver | Check if UPCLOUD_USERNAME exists + fail: + msg: "UpCloud username is missing. Env UPCLOUD_USERNAME is mandatory" + when: upcloud_username is not defined or not upcloud_username + +- name: UpCloud CSI Driver | Check if UPCLOUD_PASSWORD exists + fail: + msg: "UpCloud password is missing. Env UPCLOUD_PASSWORD is mandatory" + when: + - upcloud_username is defined + - upcloud_username|length > 0 + - upcloud_password is not defined or not upcloud_password + +- name: UpCloud CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: upcloud-csi-cred-secret, file: upcloud-csi-cred-secret.yml} + - {name: upcloud-csi-setup, file: upcloud-csi-setup.yml} + - {name: upcloud-csi-controller, file: upcloud-csi-controller.yml} + - {name: upcloud-csi-node, file: upcloud-csi-node.yml} + - {name: upcloud-csi-driver, file: upcloud-csi-driver.yml} + register: upcloud_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: UpCloud CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ upcloud_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 new file mode 100644 index 0000000..0d52837 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 @@ -0,0 +1,93 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-upcloud-controller + namespace: kube-system +spec: + serviceName: "csi-upcloud" + replicas: {{ upcloud_csi_controller_replicas }} + selector: + matchLabels: + app: csi-upcloud-controller + template: + metadata: + labels: + app: csi-upcloud-controller + role: csi-upcloud + spec: + priorityClassName: system-cluster-critical + serviceAccount: csi-upcloud-controller-sa + containers: + - name: csi-provisioner + image: registry.k8s.io/sig-storage/csi-provisioner:{{ upcloud_csi_provisioner_image_tag }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--timeout=600s" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-attacher + image: registry.k8s.io/sig-storage/csi-attacher:{{ upcloud_csi_attacher_image_tag }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--timeout=120s" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-resizer + image: registry.k8s.io/sig-storage/csi-resizer:{{ upcloud_csi_resizer_image_tag }} + args: + - "--v=5" + - "--timeout=120s" + - "--csi-address=$(ADDRESS)" + - "--handle-volume-inuse-error=true" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-upcloud-plugin + image: ghcr.io/upcloudltd/upcloud-csi:{{ upcloud_csi_plugin_image_tag }} + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodehost=$(NODE_ID)" + - "--username=$(UPCLOUD_USERNAME)" + - "--password=$(UPCLOUD_PASSWORD)" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: UPCLOUD_USERNAME + valueFrom: + secretKeyRef: + name: upcloud + key: username + - name: UPCLOUD_PASSWORD + valueFrom: + secretKeyRef: + name: upcloud + key: password + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + imagePullSecrets: + - name: regcred + volumes: + - name: socket-dir + emptyDir: {} \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2 new file mode 100644 index 0000000..5e91d88 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: upcloud + namespace: kube-system +stringData: + username: {{ upcloud_username }} + password: {{ upcloud_password }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2 new file mode 100644 index 0000000..363394a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: storage.csi.upcloud.com +spec: + attachRequired: true + podInfoOnMount: true + fsGroupPolicy: File \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 new file mode 100644 index 0000000..7173c6b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 @@ -0,0 +1,101 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-upcloud-node + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-upcloud-node + template: + metadata: + labels: + app: csi-upcloud-node + role: csi-upcloud + spec: + priorityClassName: system-node-critical + serviceAccount: csi-upcloud-node-sa + hostNetwork: true + containers: + - name: csi-node-driver-registrar + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:{{ upcloud_csi_node_image_tag }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/storage.csi.upcloud.com/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration/ + - name: csi-upcloud-plugin + image: ghcr.io/upcloudltd/upcloud-csi:{{ upcloud_csi_plugin_image_tag }} + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodehost=$(NODE_ID)" + - "--username=$(UPCLOUD_USERNAME)" + - "--password=$(UPCLOUD_PASSWORD)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: UPCLOUD_USERNAME + valueFrom: + secretKeyRef: + name: upcloud + key: username + - name: UPCLOUD_PASSWORD + valueFrom: + secretKeyRef: + name: upcloud + key: password + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: "Always" + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + imagePullSecrets: + - name: regcred + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/storage.csi.upcloud.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev +{% if upcloud_tolerations %} + tolerations: + {{ upcloud_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 new file mode 100644 index 0000000..3bc0bd5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 @@ -0,0 +1,185 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: csi-upcloud-controller-sa + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-upcloud-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-node-driver-registrar-role + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-node-driver-registrar-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-node-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "secrets" ] + verbs: [ "get", "list" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "csinodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "list", "watch", "create", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +# Attacher must be able to work with PVs, nodes and VolumeAttachments +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-attacher-role +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "nodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "csinodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments/status" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- +# Provisioner must be able to work with endpoints and leases in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: csi-upcloud-provisioner-cfg-role +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role-cfg-binding + namespace: kube-system +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: Role + name: csi-upcloud-provisioner-cfg-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-resizer-role +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims/status" ] + verbs: [ "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "list", "watch", "create", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "pods" ] + verbs: [ "watch", "list" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-resizer-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-resizer-role + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml new file mode 100644 index 0000000..0a4d02d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml @@ -0,0 +1,37 @@ +--- +external_vsphere_vcenter_port: "443" +external_vsphere_insecure: "true" +external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" +external_vsphere_version: "7.0u1" + +vsphere_syncer_image_tag: "v2.5.1" +vsphere_csi_attacher_image_tag: "v3.4.0" +vsphere_csi_controller: "v2.5.1" +vsphere_csi_liveness_probe_image_tag: "v2.6.0" +vsphere_csi_provisioner_image_tag: "v3.1.0" +vsphere_csi_snapshotter_image_tag: "v5.0.1" +vsphere_csi_node_driver_registrar_image_tag: "v2.5.0" +vsphere_csi_driver_image_tag: "v2.5.1" +vsphere_csi_resizer_tag: "v1.4.0" + +# Set to kube-system for backward compatibility, should be change to vmware-system-csi on the long run +vsphere_csi_namespace: "kube-system" + +vsphere_csi_controller_replicas: 1 + +csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}' + +vsphere_csi_aggressive_node_drain: False +vsphere_csi_aggressive_node_unreachable_timeout: 300 +vsphere_csi_aggressive_node_not_ready_timeout: 300 + +vsphere_csi_node_affinity: {} + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false + +# https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/docs/book/features/volume_snapshot.md#how-to-enable-volume-snapshot--restore-feature-in-vsphere-csi- +# according to the above link , we can controler the block-volume-snapshot parameter +vsphere_csi_block_volume_snapshot: false \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml new file mode 100644 index 0000000..5983fa0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- include_tasks: vsphere-credentials-check.yml + +- name: vSphere CSI Driver | Generate CSI cloud-config + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0640 + with_items: + - vsphere-csi-cloud-config + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: vSphere CSI Driver | Generate Manifests + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0644 + with_items: + - vsphere-csi-namespace.yml + - vsphere-csi-driver.yml + - vsphere-csi-controller-rbac.yml + - vsphere-csi-node-rbac.yml + - vsphere-csi-controller-config.yml + - vsphere-csi-controller-deployment.yml + - vsphere-csi-controller-service.yml + - vsphere-csi-node.yml + register: vsphere_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: vSphere CSI Driver | Generate a CSI secret manifest + command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml" + register: vsphere_csi_secret_manifest + when: inventory_hostname == groups['kube_control_plane'][0] + no_log: "{{ not (unsafe_show_logs|bool) }}" + +- name: vSphere CSI Driver | Apply a CSI secret manifest + command: + cmd: "{{ kubectl }} apply -f -" + stdin: "{{ vsphere_csi_secret_manifest.stdout }}" + when: inventory_hostname == groups['kube_control_plane'][0] + no_log: "{{ not (unsafe_show_logs|bool) }}" + +- name: vSphere CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" + state: "latest" + with_items: + - "{{ vsphere_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml new file mode 100644 index 0000000..3504f60 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml @@ -0,0 +1,38 @@ +--- +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value + fail: + msg: "external_vsphere_vcenter_ip is missing" + when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip + +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value + fail: + msg: "external_vsphere_vcenter_port is missing" + when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port + +- name: External vSphere Cloud Provider | check external_vsphere_insecure value + fail: + msg: "external_vsphere_insecure is missing" + when: external_vsphere_insecure is not defined or not external_vsphere_insecure + +- name: External vSphere Cloud Provider | check external_vsphere_user value + fail: + msg: "external_vsphere_user is missing" + when: external_vsphere_user is not defined or not external_vsphere_user + +- name: External vSphere Cloud Provider | check external_vsphere_password value + fail: + msg: "external_vsphere_password is missing" + when: + - external_vsphere_password is not defined or not external_vsphere_password + +- name: External vSphere Cloud Provider | check external_vsphere_datacenter value + fail: + msg: "external_vsphere_datacenter is missing" + when: + - external_vsphere_datacenter is not defined or not external_vsphere_datacenter + +- name: External vSphere Cloud Provider | check external_vsphere_kubernetes_cluster_id value + fail: + msg: "external_vsphere_kubernetes_cluster_id is missing" + when: + - external_vsphere_kubernetes_cluster_id is not defined or not external_vsphere_kubernetes_cluster_id diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 new file mode 100644 index 0000000..ee5033a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 @@ -0,0 +1,9 @@ +[Global] +cluster-id = "{{ external_vsphere_kubernetes_cluster_id }}" + +[VirtualCenter "{{ external_vsphere_vcenter_ip }}"] +insecure-flag = "{{ external_vsphere_insecure }}" +user = "{{ external_vsphere_user }}" +password = "{{ external_vsphere_password }}" +port = "{{ external_vsphere_vcenter_port }}" +datacenters = "{{ external_vsphere_datacenter }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 new file mode 100644 index 0000000..d7ee521 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 @@ -0,0 +1,24 @@ +apiVersion: v1 +data: + "csi-migration": "false" +{% if external_vsphere_version >= "7.0" %} + "csi-auth-check": "true" +{% else %} + "csi-auth-check": "false" +{% endif %} + "online-volume-extend": "true" + "trigger-csi-fullsync": "false" + "async-query-volume": "true" + "improved-csi-idempotency": "true" + "improved-volume-topology": "true" + "block-volume-snapshot": "{{ vsphere_csi_block_volume_snapshot }}" + "csi-windows-support": "false" +{% if vsphere_csi_controller is version('v2.5.0', '>=') %} + "use-csinode-id": "true" + "pv-to-backingdiskobjectid-mapping": "false" + "cnsmgr-suspend-create-volume": "false" +{% endif %} +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: "{{ vsphere_csi_namespace }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 new file mode 100644 index 0000000..1c1de2f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 @@ -0,0 +1,220 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" +spec: + replicas: {{ vsphere_csi_controller_replicas }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 0 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - vsphere-csi-controller + topologyKey: "kubernetes.io/hostname" + serviceAccountName: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - operator: "Exists" + key: node-role.kubernetes.io/master + effect: NoSchedule + - operator: "Exists" + key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% if vsphere_csi_aggressive_node_drain %} + # set below toleration if you need an aggressive pod eviction in case when + # node becomes not-ready or unreachable. Default is 300 seconds if not specified. + - key: node.kubernetes.io/not-ready + operator: Exists + effect: NoExecute + tolerationSeconds: {{ vsphere_csi_aggressive_node_not_ready_timeout }} + - key: node.kubernetes.io/unreachable + operator: Exists + effect: NoExecute + tolerationSeconds: {{ vsphere_csi_aggressive_node_unreachable_timeout }} +{% endif %} + dnsPolicy: "Default" + containers: + - name: csi-attacher + image: {{ kube_image_repo }}/sig-storage/csi-attacher:{{ vsphere_csi_attacher_image_tag }} + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% if external_vsphere_version >= "7.0" %} + - name: csi-resizer + image: {{ kube_image_repo }}/sig-storage/csi-resizer:{{ vsphere_csi_resizer_tag }} + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--handle-volume-inuse-error=false" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% endif %} + - name: vsphere-csi-controller + image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_controller }} + args: + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace={{ vsphere_csi_namespace }}" + - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}" + - "--use-gocsi=false" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: CSI_ENDPOINT + value: unix://{{ csi_endpoint }}/csi.sock + - name: X_CSI_MODE + value: "controller" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION +{% if external_vsphere_version >= "7.0u1" %} + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" +{% endif %} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: {{ csi_endpoint }} + name: socket-dir + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + - name: prometheus + containerPort: 2112 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: {{ kube_image_repo }}/sig-storage/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }} + args: + - "--v=4" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: {{ csi_endpoint }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: {{ csi_endpoint }} + - name: vsphere-syncer + image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/syncer:{{ vsphere_syncer_image_tag }} + args: + - "--leader-election" + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace={{ vsphere_csi_namespace }}" + - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION +{% if external_vsphere_version >= "7.0u1" %} + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" +{% endif %} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - name: csi-provisioner + image: {{ kube_image_repo }}/sig-storage/csi-provisioner:{{ vsphere_csi_provisioner_image_tag }} + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" +{% if vsphere_csi_controller is version('v2.2.0', '>=') %} + - "--kube-api-qps=100" + - "--kube-api-burst=100" +{% endif %} + - "--leader-election" + - "--default-fstype=ext4" + # needed only for topology aware setup + #- "--feature-gates=Topology=true" + #- "--strict-topology" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% if vsphere_csi_controller is version('v2.5.0', '>=') %} + - name: csi-snapshotter + image: {{ kube_image_repo }}/sig-storage/csi-snapshotter:{{ vsphere_csi_snapshotter_image_tag }} + args: + - "--v=4" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% endif %} + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 new file mode 100644 index 0000000..fd614f9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 @@ -0,0 +1,86 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "pods", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] +{% if external_vsphere_version >= "7.0" %} + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] +{% if external_vsphere_version >= "7.0u1" %} + verbs: ["patch"] +{% else %} + verbs: ["update", "patch"] +{% endif %} +{% endif %} + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +{% if vsphere_csi_controller is version('v2.0.0', '>=') %} + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +{% endif %} + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses","csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["cns.vmware.com"] + resources: ["triggercsifullsyncs"] + verbs: ["create", "get", "update", "watch", "list"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvspherevolumemigrations"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "create", "update"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvolumeoperationrequests"] + verbs: ["create", "get", "list", "update", "delete"] + - apiGroups: [ "cns.vmware.com" ] + resources: [ "csinodetopologies" ] + verbs: ["get", "update", "watch", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshots" ] + verbs: [ "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotclasses" ] + verbs: [ "watch", "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents" ] + verbs: [ "create", "get", "list", "watch", "update", "delete", "patch" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents/status" ] + verbs: [ "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 new file mode 100644 index 0000000..75967ba --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2 new file mode 100644 index 0000000..ad3260e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2 @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 new file mode 100644 index 0000000..0a28bda --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ vsphere_csi_namespace }}" \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 new file mode 100644 index 0000000..42896e1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 @@ -0,0 +1,55 @@ +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-cluster-role +rules: + - apiGroups: ["cns.vmware.com"] + resources: ["csinodetopologies"] + verbs: ["create", "watch", "get", "patch" ] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-cluster-role-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +roleRef: + kind: ClusterRole + name: vsphere-csi-node-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-role + namespace: "{{ vsphere_csi_namespace }}" +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-binding + namespace: "{{ vsphere_csi_namespace }}" +subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +roleRef: + kind: Role + name: vsphere-csi-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 new file mode 100644 index 0000000..1a8370d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 @@ -0,0 +1,157 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +spec: + selector: + matchLabels: + app: vsphere-csi-node + updateStrategy: + type: "RollingUpdate" + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + nodeSelector: + kubernetes.io/os: linux +{% if vsphere_csi_node_affinity %} + affinity: + {{ vsphere_csi_node_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} + serviceAccountName: vsphere-csi-node + hostNetwork: true + dnsPolicy: "ClusterFirstWithHostNet" + containers: + - name: node-driver-registrar + image: {{ kube_image_repo }}/sig-storage/csi-node-driver-registrar:{{ vsphere_csi_node_driver_registrar_image_tag }} +{% if external_vsphere_version < "7.0u1" %} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] +{% endif %} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + - name: vsphere-csi-node + image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_driver_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace={{ vsphere_csi_namespace }}" + - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}" + - "--use-gocsi=false" + imagePullPolicy: "Always" + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" # Maximum number of volumes that controller can publish to the node. If value is not set or zero Kubernetes decide how many volumes can be published by the controller to the node. + - name: X_CSI_MODE + value: "node" + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_DEBUG + value: "true" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + - name: blocks-dir + mountPath: /sys/block + - name: sys-devices-dir + mountPath: /sys/devices + ports: + - containerPort: 9808 + name: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: {{ kube_image_repo }}/sig-storage/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }} + args: +{% if external_vsphere_version >= "7.0u1" %} + - "--v=4" +{% endif %} + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev + - name: blocks-dir + hostPath: + path: /sys/block + type: Directory + - name: sys-devices-dir + hostPath: + path: /sys/devices + type: Directory + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/defaults/main.yml new file mode 100644 index 0000000..5d9ba29 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/defaults/main.yml @@ -0,0 +1,14 @@ +--- +external_hcloud_cloud: + hcloud_api_token: "" + token_secret_name: hcloud + + service_account_name: cloud-controller-manager + + controller_image_tag: "latest" + ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset + ## Format: + ## external_hcloud_cloud.controller_extra_args: + ## arg1: "value1" + ## arg2: "value2" + controller_extra_args: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml new file mode 100644 index 0000000..e09f99d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: External Hcloud Cloud Controller | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + group: "{{ kube_cert_group }}" + mode: 0640 + with_items: + - {name: external-hcloud-cloud-secret, file: external-hcloud-cloud-secret.yml} + - {name: external-hcloud-cloud-service-account, file: external-hcloud-cloud-service-account.yml} + - {name: external-hcloud-cloud-role-bindings, file: external-hcloud-cloud-role-bindings.yml} + - {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"} + + register: external_hcloud_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + tags: external-hcloud + +- name: External Hcloud Cloud Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ external_hcloud_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + tags: external-hcloud diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds-with-networks.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds-with-networks.yml.j2 new file mode 100644 index 0000000..c64a566 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds-with-networks.yml.j2 @@ -0,0 +1,72 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: hcloud-cloud-controller-manger +spec: + selector: + matchLabels: + app: hcloud-cloud-controller-manager + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: {{ external_hcloud_cloud.service_account_name }} + dnsPolicy: Default + tolerations: + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + operator: Exists + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + hostNetwork: true + containers: + - image: {{ docker_image_repo }}/hetznercloud/hcloud-cloud-controller-manager:{{ external_hcloud_cloud.controller_image_tag }} + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" + - "--allocate-node-cidrs=true" + - "--cluster-cidr={{ kube_pods_subnet }}" +{% if external_hcloud_cloud.controller_extra_args is defined %} + + args: +{% for key, value in external_hcloud_cloud.controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} +{% endif %} + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: {{ external_hcloud_cloud.token_secret_name }} + key: token + - name: HCLOUD_NETWORK + valueFrom: + secretKeyRef: + name: {{ external_hcloud_cloud.token_secret_name }} + key: network diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds.yml.j2 new file mode 100644 index 0000000..95473cd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds.yml.j2 @@ -0,0 +1,63 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: hcloud-cloud-controller-manger +spec: + selector: + matchLabels: + app: hcloud-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: {{ external_hcloud_cloud.service_account_name }} + dnsPolicy: Default + tolerations: + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + containers: + - image: {{ docker_image_repo }}/hetznercloud/hcloud-cloud-controller-manager:{{ external_hcloud_cloud.controller_image_tag }} + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" +{% if external_hcloud_cloud.controller_extra_args is defined %} + args: +{% for key, value in external_hcloud_cloud.controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} +{% endif %} + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: {{ external_hcloud_cloud.token_secret_name }} + key: token diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2 new file mode 100644 index 0000000..270c947 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: {{ external_hcloud_cloud.service_account_name }} + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 new file mode 100644 index 0000000..c2ea894 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: "{{ external_hcloud_cloud.token_secret_name }}" + namespace: kube-system +data: + token: "{{ external_hcloud_cloud.hcloud_api_token | b64encode }}" +{% if external_hcloud_cloud.with_networks %} + network: "{{ network_id|b64encode }}" +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2 new file mode 100644 index 0000000..93277dd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ external_hcloud_cloud.service_account_name }} + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/meta/main.yml new file mode 100644 index 0000000..6e8c235 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/meta/main.yml @@ -0,0 +1,32 @@ +--- +dependencies: + - role: kubernetes-apps/external_cloud_controller/openstack + when: + - cloud_provider is defined + - cloud_provider == "external" + - external_cloud_provider is defined + - external_cloud_provider == "openstack" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - external-cloud-controller + - external-openstack + - role: kubernetes-apps/external_cloud_controller/vsphere + when: + - cloud_provider is defined + - cloud_provider == "external" + - external_cloud_provider is defined + - external_cloud_provider == "vsphere" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - external-cloud-controller + - external-vsphere + - role: kubernetes-apps/external_cloud_controller/hcloud + when: + - cloud_provider is defined + - cloud_provider == "external" + - external_cloud_provider is defined + - external_cloud_provider == "hcloud" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - external-cloud-controller + - external-hcloud diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS new file mode 100644 index 0000000..6cfbaa8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - alijahnas + - luckySB diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml new file mode 100644 index 0000000..71af4b4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml @@ -0,0 +1,24 @@ +--- +# The external cloud controller will need credentials to access +# openstack apis. Per default these values will be +# read from the environment. +external_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +external_openstack_username: "{{ lookup('env','OS_USERNAME') }}" +external_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +external_openstack_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}" +external_openstack_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}" +external_openstack_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}" +external_openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" +external_openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}" +external_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}" +external_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +external_openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" +external_openstack_cacert: "{{ lookup('env','OS_CACERT') }}" + +## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +## Format: +## external_openstack_cloud_controller_extra_args: +## arg1: "value1" +## arg2: "value2" +external_openstack_cloud_controller_extra_args: {} +external_openstack_cloud_controller_image_tag: "v1.25.3" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml new file mode 100644 index 0000000..ac3810c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- include_tasks: openstack-credential-check.yml + tags: external-openstack + +- name: External OpenStack Cloud Controller | Get base64 cacert + slurp: + src: "{{ external_openstack_cacert }}" + register: external_openstack_cacert_b64 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - external_openstack_cacert is defined + - external_openstack_cacert | length > 0 + tags: external-openstack + +- name: External OpenStack Cloud Controller | Get base64 cloud-config + set_fact: + external_openstack_cloud_config_secret: "{{ lookup('template', 'external-openstack-cloud-config.j2') | b64encode }}" + when: inventory_hostname == groups['kube_control_plane'][0] + tags: external-openstack + +- name: External OpenStack Cloud Controller | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + group: "{{ kube_cert_group }}" + mode: 0640 + with_items: + - {name: external-openstack-cloud-config-secret, file: external-openstack-cloud-config-secret.yml} + - {name: external-openstack-cloud-controller-manager-roles, file: external-openstack-cloud-controller-manager-roles.yml} + - {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml} + - {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml} + register: external_openstack_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + tags: external-openstack + +- name: External OpenStack Cloud Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ external_openstack_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + tags: external-openstack diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml new file mode 100644 index 0000000..9abc927 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml @@ -0,0 +1,66 @@ +--- +- name: External OpenStack Cloud Controller | check external_openstack_auth_url value + fail: + msg: "external_openstack_auth_url is missing" + when: external_openstack_auth_url is not defined or not external_openstack_auth_url + + +- name: External OpenStack Cloud Controller | check external_openstack_username or external_openstack_application_credential_name value + fail: + msg: "you must either set external_openstack_username or external_openstack_application_credential_name" + when: + - external_openstack_username is not defined or not external_openstack_username + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name + + +- name: External OpenStack Cloud Controller | check external_openstack_application_credential_id value + fail: + msg: "external_openstack_application_credential_id is missing" + when: + - external_openstack_application_credential_name is defined + - external_openstack_application_credential_name|length > 0 + - external_openstack_application_credential_id is not defined or not external_openstack_application_credential_id + + +- name: External OpenStack Cloud Controller | check external_openstack_application_credential_secret value + fail: + msg: "external_openstack_application_credential_secret is missing" + when: + - external_openstack_application_credential_name is defined + - external_openstack_application_credential_name|length > 0 + - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret + + +- name: External OpenStack Cloud Controller | check external_openstack_password value + fail: + msg: "external_openstack_password is missing" + when: + - external_openstack_username is defined + - external_openstack_username|length > 0 + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name + - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret + - external_openstack_password is not defined or not external_openstack_password + + +- name: External OpenStack Cloud Controller | check external_openstack_region value + fail: + msg: "external_openstack_region is missing" + when: external_openstack_region is not defined or not external_openstack_region + + +- name: External OpenStack Cloud Controller | check external_openstack_tenant_id value + fail: + msg: "one of external_openstack_tenant_id or external_openstack_tenant_name must be specified" + when: + - external_openstack_tenant_id is not defined or not external_openstack_tenant_id + - external_openstack_tenant_name is not defined or not external_openstack_tenant_name + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name + + +- name: External OpenStack Cloud Controller | check external_openstack_domain_id value + fail: + msg: "one of external_openstack_domain_id or external_openstack_domain_name must be specified" + when: + - external_openstack_domain_id is not defined or not external_openstack_domain_id + - external_openstack_domain_name is not defined or not external_openstack_domain_name + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..2a6f6a8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config-secret.yml.j2 @@ -0,0 +1,13 @@ +# This YAML file contains secret objects, +# which are necessary to run external openstack cloud controller. + +kind: Secret +apiVersion: v1 +metadata: + name: external-openstack-cloud-config + namespace: kube-system +data: + cloud.conf: {{ external_openstack_cloud_config_secret }} +{% if external_openstack_cacert_b64.content is defined %} + ca.cert: {{ external_openstack_cacert_b64.content }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 new file mode 100644 index 0000000..adb08ae --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 @@ -0,0 +1,87 @@ +[Global] +auth-url="{{ external_openstack_auth_url }}" +{% if external_openstack_application_credential_id == "" and external_openstack_application_credential_name == "" %} +username="{{ external_openstack_username }}" +password="{{ external_openstack_password }}" +{% endif %} +{% if external_openstack_application_credential_id is defined and external_openstack_application_credential_id != "" %} +application-credential-id={{ external_openstack_application_credential_id }} +{% endif %} +{% if external_openstack_application_credential_name is defined and external_openstack_application_credential_name != "" %} +application-credential-name={{ external_openstack_application_credential_name }} +{% endif %} +{% if external_openstack_application_credential_secret is defined and external_openstack_application_credential_secret != "" %} +application-credential-secret={{ external_openstack_application_credential_secret }} +{% endif %} +region="{{ external_openstack_region }}" +{% if external_openstack_tenant_id is defined and external_openstack_tenant_id != "" %} +tenant-id="{{ external_openstack_tenant_id }}" +{% endif %} +{% if external_openstack_tenant_name is defined and external_openstack_tenant_name != "" %} +tenant-name="{{ external_openstack_tenant_name }}" +{% endif %} +{% if external_openstack_domain_name is defined and external_openstack_domain_name != "" %} +domain-name="{{ external_openstack_domain_name }}" +{% elif external_openstack_domain_id is defined and external_openstack_domain_id != "" %} +domain-id ="{{ external_openstack_domain_id }}" +{% endif %} +{% if external_openstack_cacert is defined and external_openstack_cacert != "" %} +ca-file="{{ kube_config_dir }}/external-openstack-cacert.pem" +{% endif %} + +[LoadBalancer] +create-monitor={{ external_openstack_lbaas_create_monitor }} +monitor-delay={{ external_openstack_lbaas_monitor_delay }} +monitor-timeout={{ external_openstack_lbaas_monitor_timeout }} +monitor-max-retries={{ external_openstack_lbaas_monitor_max_retries }} +{% if external_openstack_lbaas_method is defined %} +lb-method={{ external_openstack_lbaas_method }} +{% endif %} +{% if external_openstack_lbaas_network_id is defined %} +network-id={{ external_openstack_lbaas_network_id }} +{% endif %} +{% if external_openstack_lbaas_subnet_id is defined %} +subnet-id={{ external_openstack_lbaas_subnet_id }} +{% endif %} +{% if external_openstack_lbaas_floating_network_id is defined %} +floating-network-id={{ external_openstack_lbaas_floating_network_id }} +{% endif %} +{% if external_openstack_lbaas_floating_subnet_id is defined %} +floating-subnet-id={{ external_openstack_lbaas_floating_subnet_id }} +{% endif %} +{% if external_openstack_lbaas_manage_security_groups is defined %} +manage-security-groups={{ external_openstack_lbaas_manage_security_groups }} +{% endif %} +{% if external_openstack_lbaas_internal_lb is defined %} +internal-lb={{ external_openstack_lbaas_internal_lb }} +{% endif %} +{% if external_openstack_lbaas_provider is defined %} +lb-provider={{ external_openstack_lbaas_provider }} +use-octavia={{ external_openstack_lbaas_use_octavia }} +{% else %} +lb-provider=octavia +use-octavia=true +{% endif %} +{% if external_openstack_enable_ingress_hostname is defined %} +enable-ingress-hostname={{ external_openstack_enable_ingress_hostname | bool }} +{% endif %} +{% if external_openstack_ingress_hostname_suffix is defined %} +ingress-hostname-suffix={{ external_openstack_ingress_hostname_suffix | string | lower }} +{% endif %} +{% if external_openstack_max_shared_lb is defined %} +max-shared-lb={{ external_openstack_max_shared_lb }} +{% endif %} + +[Networking] +ipv6-support-disabled={{ external_openstack_network_ipv6_disabled | string | lower }} +{% for network_name in external_openstack_network_internal_networks %} +internal-network-name="{{ network_name }}" +{% endfor %} +{% for network_name in external_openstack_network_public_networks %} +public-network-name="{{ network_name }}" +{% endfor %} + +[Metadata] +{% if external_openstack_metadata_search_order is defined %} +search-order="{{ external_openstack_metadata_search_order }}" +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 new file mode 100644 index 0000000..4596f92 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 @@ -0,0 +1,96 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: openstack-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: openstack-cloud-controller-manager +spec: + selector: + matchLabels: + k8s-app: openstack-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: openstack-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + securityContext: + runAsUser: 999 + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: cloud-controller-manager + containers: + - name: openstack-cloud-controller-manager + image: {{ docker_image_repo }}/k8scloudprovider/openstack-cloud-controller-manager:{{ external_openstack_cloud_controller_image_tag }} + args: + - /bin/openstack-cloud-controller-manager + - --v=1 + - --cloud-config=$(CLOUD_CONFIG) + - --cloud-provider=openstack + - --cluster-name={{ cluster_name }} + - --use-service-account-credentials=true + - --bind-address=127.0.0.1 +{% for key, value in external_openstack_cloud_controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} + volumeMounts: + - mountPath: /etc/kubernetes/pki + name: k8s-certs + readOnly: true + - mountPath: /etc/ssl/certs + name: ca-certs + readOnly: true + - mountPath: /etc/config/cloud.conf + name: cloud-config-volume + readOnly: true + subPath: cloud.conf + - mountPath: {{ kube_config_dir }}/external-openstack-cacert.pem + name: cloud-config-volume + readOnly: true + subPath: ca.cert +{% if kubelet_flexvolumes_plugins_dir is defined %} + - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + name: flexvolume-dir +{% endif %} + resources: + requests: + cpu: 200m + env: + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + hostNetwork: true + volumes: +{% if kubelet_flexvolumes_plugins_dir is defined %} + - hostPath: + path: "{{ kubelet_flexvolumes_plugins_dir }}" + type: DirectoryOrCreate + name: flexvolume-dir +{% endif %} + - hostPath: + path: /etc/kubernetes/pki + type: DirectoryOrCreate + name: k8s-certs + - hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate + name: ca-certs + - name: cloud-config-volume + secret: + secretName: external-openstack-cloud-config diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-role-bindings.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-role-bindings.yml.j2 new file mode 100644 index 0000000..bbdf336 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-role-bindings.yml.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +kind: List +metadata: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 new file mode 100644 index 0000000..2ab3a5b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 @@ -0,0 +1,109 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - '*' + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - list + - get + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +kind: List +metadata: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml new file mode 100644 index 0000000..91b126e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml @@ -0,0 +1,14 @@ +--- +external_vsphere_vcenter_port: "443" +external_vsphere_insecure: "true" + +## A dictionary of extra arguments to add to the vsphere cloud controller manager daemonset +## Format: +## external_vsphere_cloud_controller_extra_args: +## arg1: "value1" +## arg2: "value2" +external_vsphere_cloud_controller_extra_args: {} +external_vsphere_cloud_controller_image_tag: "latest" + +external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}" +external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml new file mode 100644 index 0000000..9c25c72 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- include_tasks: vsphere-credentials-check.yml + +- name: External vSphere Cloud Controller | Generate CPI cloud-config + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0640 + with_items: + - external-vsphere-cpi-cloud-config + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Controller | Generate Manifests + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0644 + with_items: + - external-vsphere-cpi-cloud-config-secret.yml + - external-vsphere-cloud-controller-manager-roles.yml + - external-vsphere-cloud-controller-manager-role-bindings.yml + - external-vsphere-cloud-controller-manager-ds.yml + register: external_vsphere_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest + command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml" + register: external_vsphere_configmap_manifest + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest + command: + cmd: "{{ bin_dir }}/kubectl apply -f -" + stdin: "{{ external_vsphere_configmap_manifest.stdout }}" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" + state: "latest" + with_items: + - "{{ external_vsphere_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml new file mode 100644 index 0000000..b6c12b8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml @@ -0,0 +1,32 @@ +--- +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value + fail: + msg: "external_vsphere_vcenter_ip is missing" + when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip + +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value + fail: + msg: "external_vsphere_vcenter_port is missing" + when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port + +- name: External vSphere Cloud Provider | check external_vsphere_insecure value + fail: + msg: "external_vsphere_insecure is missing" + when: external_vsphere_insecure is not defined or not external_vsphere_insecure + +- name: External vSphere Cloud Provider | check external_vsphere_user value + fail: + msg: "external_vsphere_user is missing" + when: external_vsphere_user is not defined or not external_vsphere_user + +- name: External vSphere Cloud Provider | check external_vsphere_password value + fail: + msg: "external_vsphere_password is missing" + when: + - external_vsphere_password is not defined or not external_vsphere_password + +- name: External vSphere Cloud Provider | check external_vsphere_datacenter value + fail: + msg: "external_vsphere_datacenter is missing" + when: + - external_vsphere_datacenter is not defined or not external_vsphere_datacenter diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 new file mode 100644 index 0000000..5f1068d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: vsphere-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: vsphere-cloud-controller-manager +spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + securityContext: + runAsUser: 0 + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: cloud-controller-manager + containers: + - name: vsphere-cloud-controller-manager + image: {{ gcr_image_repo }}/cloud-provider-vsphere/cpi/release/manager:{{ external_vsphere_cloud_controller_image_tag }} + args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf +{% for key, value in external_vsphere_cloud_controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + resources: + requests: + cpu: 200m + hostNetwork: true + volumes: + - name: vsphere-config-volume + configMap: + name: cloud-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system +spec: + type: NodePort + ports: + - port: 43001 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 new file mode 100644 index 0000000..9f6107d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 @@ -0,0 +1,35 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - apiGroup: "" + kind: User + name: cloud-controller-manager +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager +kind: List +metadata: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 new file mode 100644 index 0000000..2cd7ad0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 @@ -0,0 +1,91 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list + - watch + - create + - update +kind: List +metadata: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..5364f42 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 @@ -0,0 +1,11 @@ +# This YAML file contains secret objects, +# which are necessary to run external vsphere cloud controller. + +apiVersion: v1 +kind: Secret +metadata: + name: cpi-global-secret + namespace: kube-system +stringData: + {{ external_vsphere_vcenter_ip }}.username: "{{ external_vsphere_user }}" + {{ external_vsphere_vcenter_ip }}.password: "{{ external_vsphere_password }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 new file mode 100644 index 0000000..a32d876 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 @@ -0,0 +1,8 @@ +[Global] +port = "{{ external_vsphere_vcenter_port }}" +insecure-flag = "{{ external_vsphere_insecure }}" +secret-name = "cpi-global-secret" +secret-namespace = "kube-system" + +[VirtualCenter "{{ external_vsphere_vcenter_ip }}"] +datacenters = "{{ external_vsphere_datacenter }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml new file mode 100644 index 0000000..577fbff --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml @@ -0,0 +1,10 @@ +--- +cephfs_provisioner_namespace: "cephfs-provisioner" +cephfs_provisioner_cluster: ceph +cephfs_provisioner_monitors: ~ +cephfs_provisioner_admin_id: admin +cephfs_provisioner_secret: secret +cephfs_provisioner_storage_class: cephfs +cephfs_provisioner_reclaim_policy: Delete +cephfs_provisioner_claim_root: /volumes +cephfs_provisioner_deterministic_names: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml new file mode 100644 index 0000000..95a2f75 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -0,0 +1,80 @@ +--- + +- name: CephFS Provisioner | Remove legacy addon dir and manifests + file: + path: "{{ kube_config_dir }}/addons/cephfs_provisioner" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: CephFS Provisioner | Remove legacy namespace + command: > + {{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: CephFS Provisioner | Remove legacy storageclass + command: > + {{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: CephFS Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cephfs_provisioner" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: CephFS Provisioner | Templates list + set_fact: + cephfs_provisioner_templates: + - { name: 00-namespace, file: 00-namespace.yml, type: ns } + - { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret } + - { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa } + - { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole } + - { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding } + - { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role } + - { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding } + - { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: deploy } + - { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc } + cephfs_provisioner_templates_for_psp: + - { name: psp-cephfs-provisioner, file: psp-cephfs-provisioner.yml, type: psp } + +- name: CephFS Provisioner | Append extra templates to CephFS Provisioner Templates list for PodSecurityPolicy + set_fact: + cephfs_provisioner_templates: "{{ cephfs_provisioner_templates_for_psp + cephfs_provisioner_templates }}" + when: + - podsecuritypolicy_enabled + - cephfs_provisioner_namespace != "kube-system" + +- name: CephFS Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ cephfs_provisioner_templates }}" + register: cephfs_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: CephFS Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ cephfs_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ cephfs_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 new file mode 100644 index 0000000..2a2a67c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cephfs_provisioner_namespace }} + labels: + name: {{ cephfs_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..4c92ea6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2 @@ -0,0 +1,26 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: ["policy"] + resourceNames: ["cephfs-provisioner"] + resources: ["podsecuritypolicies"] + verbs: ["use"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..cc5d5ff --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cephfs-provisioner +subjects: + - kind: ServiceAccount + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: cephfs-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..ac3bb33 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 @@ -0,0 +1,34 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} + labels: + app: cephfs-provisioner + version: {{ cephfs_provisioner_image_tag }} +spec: + replicas: 1 + selector: + matchLabels: + app: cephfs-provisioner + version: {{ cephfs_provisioner_image_tag }} + template: + metadata: + labels: + app: cephfs-provisioner + version: {{ cephfs_provisioner_image_tag }} + spec: + priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccount: cephfs-provisioner + containers: + - name: cephfs-provisioner + image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: PROVISIONER_NAME + value: ceph.com/cephfs + command: + - "/usr/local/bin/cephfs-provisioner" + args: + - "-id=cephfs-provisioner-1" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..76d146c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cephfs-provisioner + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..1fb80a1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "delete"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..01ab87b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +subjects: + - kind: ServiceAccount + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cephfs-provisioner diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..31f87bd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..dd0e37e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ cephfs_provisioner_storage_class }} +provisioner: ceph.com/cephfs +reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }} +parameters: + cluster: {{ cephfs_provisioner_cluster }} + monitors: {{ cephfs_provisioner_monitors }} + adminId: {{ cephfs_provisioner_admin_id }} + adminSecretName: cephfs-provisioner + adminSecretNamespace: {{ cephfs_provisioner_namespace }} + claimRoot: {{ cephfs_provisioner_claim_root }} + deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..6d73c0c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2 @@ -0,0 +1,9 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +type: Opaque +data: + secret: {{ cephfs_provisioner_secret | b64encode }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml new file mode 100644 index 0000000..278518b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml @@ -0,0 +1,9 @@ +--- +local_path_provisioner_enabled: false +local_path_provisioner_namespace: "local-path-storage" +local_path_provisioner_storage_class: "local-path" +local_path_provisioner_reclaim_policy: Delete +local_path_provisioner_claim_root: /opt/local-path-provisioner/ +local_path_provisioner_is_default_storageclass: "true" +local_path_provisioner_debug: false +local_path_provisioner_helper_image_tag: "latest" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml new file mode 100644 index 0000000..4cf26d8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: Local Path Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/local_path_provisioner" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Local Path Provisioner | Create claim root dir + file: + path: "{{ local_path_provisioner_claim_root }}" + state: directory + mode: 0755 + +- name: Local Path Provisioner | Render Template + set_fact: + local_path_provisioner_templates: + - { name: local-path-storage-ns, file: local-path-storage-ns.yml, type: ns } + - { name: local-path-storage-sa, file: local-path-storage-sa.yml, type: sa } + - { name: local-path-storage-cr, file: local-path-storage-cr.yml, type: cr } + - { name: local-path-storage-clusterrolebinding, file: local-path-storage-clusterrolebinding.yml, type: clusterrolebinding } + - { name: local-path-storage-cm, file: local-path-storage-cm.yml, type: cm } + - { name: local-path-storage-deployment, file: local-path-storage-deployment.yml, type: deployment } + - { name: local-path-storage-sc, file: local-path-storage-sc.yml, type: sc } + local_path_provisioner_templates_for_psp_not_system_ns: + - { name: local-path-storage-psp, file: local-path-storage-psp.yml, type: psp } + - { name: local-path-storage-psp-role, file: local-path-storage-psp-cr.yml, type: clusterrole } + - { name: local-path-storage-psp-rb, file: local-path-storage-psp-rb.yml, type: rolebinding } + +- name: Local Path Provisioner | Insert extra templates to Local Path Provisioner templates list for PodSecurityPolicy + set_fact: + local_path_provisioner_templates: "{{ local_path_provisioner_templates[:3] + local_path_provisioner_templates_for_psp_not_system_ns + local_path_provisioner_templates[3:] }}" + when: + - podsecuritypolicy_enabled + - local_path_provisioner_namespace != "kube-system" + +- name: Local Path Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ local_path_provisioner_templates }}" + register: local_path_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Local Path Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ local_path_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ local_path_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..317a71f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: {{ local_path_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2 new file mode 100644 index 0000000..8574312 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2 @@ -0,0 +1,59 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: {{ local_path_provisioner_namespace }} +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["{{ local_path_provisioner_claim_root }}"] + } + ] + } + setup: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + mkdir -m 0777 -p ${absolutePath} + teardown: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + rm -rf ${absolutePath} + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: {% if local_path_provisioner_helper_image_repo is defined %}{{ local_path_provisioner_helper_image_repo }}:{{ local_path_provisioner_helper_image_tag }}{% else %}busybox{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2 new file mode 100644 index 0000000..c97511a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["endpoints", "persistentvolumes", "pods"] + verbs: ["*"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 new file mode 100644 index 0000000..6922691 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 @@ -0,0 +1,41 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: {{ local_path_provisioner_namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: {{ local_path_provisioner_image_repo }}:{{ local_path_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - local-path-provisioner + - start + - --config + - /etc/config/config.json +{% if local_path_provisioner_debug|default(false) %} + - --debug +{% endif %} + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2 new file mode 100644 index 0000000..1e8c6ce --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ local_path_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2 new file mode 100644 index 0000000..65a71f5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2 @@ -0,0 +1,15 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:local-path-provisioner + namespace: {{ local_path_provisioner_namespace }} +rules: + - apiGroups: + - policy + resourceNames: + - local-path-provisioner + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-rb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-rb.yml.j2 new file mode 100644 index 0000000..c7e6d21 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-rb.yml.j2 @@ -0,0 +1,14 @@ +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:local-path-provisioner + namespace: {{ local_path_provisioner_namespace }} +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: {{ local_path_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: psp:local-path-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp.yml.j2 new file mode 100644 index 0000000..55d5adb --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp.yml.j2 @@ -0,0 +1,43 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: local-path-provisioner + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'secret' + - 'downwardAPI' + - 'hostPath' + allowedHostPaths: + - pathPrefix: "{{ local_path_provisioner_claim_root }}" + readOnly: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2 new file mode 100644 index 0000000..128a106 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: {{ local_path_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2 new file mode 100644 index 0000000..d662661 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ local_path_provisioner_storage_class }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ local_path_provisioner_is_default_storageclass }}" +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: {{ local_path_provisioner_reclaim_policy }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml new file mode 100644 index 0000000..16ed6ff --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -0,0 +1,20 @@ +--- +local_volume_provisioner_namespace: "kube-system" +# List of node labels to be copied to the PVs created by the provisioner +local_volume_provisioner_nodelabels: [] +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +local_volume_provisioner_tolerations: [] +local_volume_provisioner_use_node_name_only: false +# Leverages Ansible's string to Python datatype casting. Otherwise the dict_key isn't substituted. +# see https://github.com/ansible/ansible/issues/17324 +local_volume_provisioner_storage_classes: | + { + "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { + "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}", + "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", + "volume_mode": "Filesystem", + "fs_type": "ext4" + } + } diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml new file mode 100644 index 0000000..7add2da --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml @@ -0,0 +1,12 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: "Local Volume Provisioner | Ensure base dir {{ delegate_host_base_dir.1 }} is created on {{ delegate_host_base_dir.0 }}" + file: + path: "{{ local_volume_provisioner_storage_classes[delegate_host_base_dir.1].host_dir }}" + state: directory + owner: root + group: root + mode: "{{ local_volume_provisioner_directory_mode }}" + delegate_to: "{{ delegate_host_base_dir.0 }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml new file mode 100644 index 0000000..2308b5c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -0,0 +1,48 @@ +--- + +- name: Local Volume Provisioner | Ensure base dir is created on all hosts + include_tasks: basedirs.yml + loop_control: + loop_var: delegate_host_base_dir + loop: "{{ groups['k8s_cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}" + +- name: Local Volume Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/local_volume_provisioner" + state: directory + owner: root + group: root + mode: 0755 + +- name: Local Volume Provisioner | Templates list + set_fact: + local_volume_provisioner_templates: + - { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns } + - { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa } + - { name: local-volume-provisioner-clusterrole, file: local-volume-provisioner-clusterrole.yml, type: clusterrole } + - { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type: clusterrolebinding } + - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type: cm } + - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type: ds } + - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type: sc } + +- name: Local Volume Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ local_volume_provisioner_templates }}" + register: local_volume_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Local Volume Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ local_volume_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ local_volume_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 new file mode 100644 index 0000000..ada55dd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-volume-provisioner-node-clusterrole + namespace: {{ local_volume_provisioner_namespace }} +rules: +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["watch"] +- apiGroups: ["", "events.k8s.io"] + resources: ["events"] + verbs: ["create", "update", "patch"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..bc286b2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-volume-provisioner-system-node + namespace: {{ local_volume_provisioner_namespace }} +subjects: +- kind: ServiceAccount + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: local-volume-provisioner-node-clusterrole + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 new file mode 100644 index 0000000..76625b6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 @@ -0,0 +1,33 @@ +# Macro to convert camelCase dictionary keys to snake_case keys +{% macro convert_keys(mydict) -%} + {% for key in mydict.keys()|list -%} + {% set key_split = key.split('_') -%} + {% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%} + {% set value = mydict.pop(key) -%} + {{ mydict.__setitem__(new_key, value) -}} + {{ convert_keys(value) if value is mapping else None -}} + {% endfor -%} +{% endmacro -%} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} +data: +{% if local_volume_provisioner_nodelabels | length > 0 %} + nodeLabelsForPV: | +{% for nodelabel in local_volume_provisioner_nodelabels %} + - {{ nodelabel }} +{% endfor %} +{% endif %} +{% if local_volume_provisioner_use_node_name_only %} + useNodeNameOnly: "true" +{% endif %} + storageClassMap: | +{% for class_name, storage_class in local_volume_provisioner_storage_classes.items() %} + {{ class_name }}: + {{- convert_keys(storage_class) }} + {{ storage_class | to_nice_yaml(indent=2) | indent(6) }} +{%- endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 new file mode 100644 index 0000000..a8747a2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 @@ -0,0 +1,66 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} + labels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} +spec: + selector: + matchLabels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} + template: + metadata: + labels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} + spec: + priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccountName: local-volume-provisioner + nodeSelector: + kubernetes.io/os: linux +{% if local_volume_provisioner_tolerations %} + tolerations: + {{ local_volume_provisioner_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} + containers: + - name: provisioner + image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + securityContext: + privileged: true + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: local-volume-provisioner + mountPath: /etc/provisioner/config + readOnly: true + - mountPath: /dev + name: provisioner-dev +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} + - name: local-volume-provisioner-hostpath-{{ class_name }} + mountPath: {{ class_config.mount_dir }} + mountPropagation: "HostToContainer" +{% endfor %} + volumes: + - name: local-volume-provisioner + configMap: + name: local-volume-provisioner + - name: provisioner-dev + hostPath: + path: /dev +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} + - name: local-volume-provisioner-hostpath-{{ class_name }} + hostPath: + path: {{ class_config.host_dir }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 new file mode 100644 index 0000000..04a7910 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ local_volume_provisioner_namespace }} + labels: + name: {{ local_volume_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 new file mode 100644 index 0000000..c78a16b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 new file mode 100644 index 0000000..81e0260 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 @@ -0,0 +1,12 @@ +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ class_name }} +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +{% if class_config.reclaim_policy is defined %} +reclaimPolicy: {{ class_config.reclaim_policy }} +{% endif %} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/meta/main.yml new file mode 100644 index 0000000..13bc8b6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/meta/main.yml @@ -0,0 +1,30 @@ +--- +dependencies: + - role: kubernetes-apps/external_provisioner/local_volume_provisioner + when: + - local_volume_provisioner_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - apps + - local-volume-provisioner + - external-provisioner + + - role: kubernetes-apps/external_provisioner/cephfs_provisioner + when: cephfs_provisioner_enabled + tags: + - apps + - cephfs-provisioner + - external-provisioner + + - role: kubernetes-apps/external_provisioner/rbd_provisioner + when: rbd_provisioner_enabled + tags: + - apps + - rbd-provisioner + - external-provisioner + - role: kubernetes-apps/external_provisioner/local_path_provisioner + when: local_path_provisioner_enabled + tags: + - apps + - local-path-provisioner + - external-provisioner diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml new file mode 100644 index 0000000..f09e25a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml @@ -0,0 +1,17 @@ +--- +rbd_provisioner_namespace: "rbd-provisioner" +rbd_provisioner_replicas: 2 +rbd_provisioner_monitors: ~ +rbd_provisioner_pool: kube +rbd_provisioner_admin_id: admin +rbd_provisioner_secret_name: ceph-secret-admin +rbd_provisioner_secret: ceph-key-admin +rbd_provisioner_user_id: kube +rbd_provisioner_user_secret_name: ceph-secret-user +rbd_provisioner_user_secret: ceph-key-user +rbd_provisioner_user_secret_namespace: rbd-provisioner +rbd_provisioner_fs_type: ext4 +rbd_provisioner_image_format: "2" +rbd_provisioner_image_features: layering +rbd_provisioner_storage_class: rbd +rbd_provisioner_reclaim_policy: Delete diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml new file mode 100644 index 0000000..1d08376 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml @@ -0,0 +1,80 @@ +--- + +- name: RBD Provisioner | Remove legacy addon dir and manifests + file: + path: "{{ kube_config_dir }}/addons/rbd_provisioner" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: RBD Provisioner | Remove legacy namespace + command: > + {{ kubectl }} delete namespace {{ rbd_provisioner_namespace }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: RBD Provisioner | Remove legacy storageclass + command: > + {{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: RBD Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/rbd_provisioner" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: RBD Provisioner | Templates list + set_fact: + rbd_provisioner_templates: + - { name: 00-namespace, file: 00-namespace.yml, type: ns } + - { name: secret-rbd-provisioner, file: secret-rbd-provisioner.yml, type: secret } + - { name: sa-rbd-provisioner, file: sa-rbd-provisioner.yml, type: sa } + - { name: clusterrole-rbd-provisioner, file: clusterrole-rbd-provisioner.yml, type: clusterrole } + - { name: clusterrolebinding-rbd-provisioner, file: clusterrolebinding-rbd-provisioner.yml, type: clusterrolebinding } + - { name: role-rbd-provisioner, file: role-rbd-provisioner.yml, type: role } + - { name: rolebinding-rbd-provisioner, file: rolebinding-rbd-provisioner.yml, type: rolebinding } + - { name: deploy-rbd-provisioner, file: deploy-rbd-provisioner.yml, type: deploy } + - { name: sc-rbd-provisioner, file: sc-rbd-provisioner.yml, type: sc } + rbd_provisioner_templates_for_psp: + - { name: psp-rbd-provisioner, file: psp-rbd-provisioner.yml, type: psp } + +- name: RBD Provisioner | Append extra templates to RBD Provisioner Templates list for PodSecurityPolicy + set_fact: + rbd_provisioner_templates: "{{ rbd_provisioner_templates_for_psp + rbd_provisioner_templates }}" + when: + - podsecuritypolicy_enabled + - rbd_provisioner_namespace != "kube-system" + +- name: RBD Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ rbd_provisioner_templates }}" + register: rbd_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: RBD Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ rbd_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ rbd_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2 new file mode 100644 index 0000000..8bec2b5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ rbd_provisioner_namespace }} + labels: + name: {{ rbd_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrole-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrole-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..8fc7e4b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrole-rbd-provisioner.yml.j2 @@ -0,0 +1,30 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + resourceNames: ["kube-dns","coredns"] + verbs: ["list", "get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: ["policy"] + resourceNames: ["rbd-provisioner"] + resources: ["podsecuritypolicies"] + verbs: ["use"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..ae9e6c5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rbd-provisioner +subjects: + - kind: ServiceAccount + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: rbd-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..dccc165 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 @@ -0,0 +1,40 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} + labels: + app: rbd-provisioner + version: {{ rbd_provisioner_image_tag }} +spec: + replicas: {{ rbd_provisioner_replicas }} + strategy: + type: Recreate + selector: + matchLabels: + app: rbd-provisioner + version: {{ rbd_provisioner_image_tag }} + template: + metadata: + labels: + app: rbd-provisioner + version: {{ rbd_provisioner_image_tag }} + spec: + priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccount: rbd-provisioner + containers: + - name: rbd-provisioner + image: {{ rbd_provisioner_image_repo }}:{{ rbd_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: PROVISIONER_NAME + value: ceph.com/rbd + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - "/usr/local/bin/rbd-provisioner" + args: + - "-id=${POD_NAME}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/psp-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/psp-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..c59effd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/psp-rbd-provisioner.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: rbd-provisioner + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..d8dbbf9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..fcae1cc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +subjects: + - kind: ServiceAccount + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rbd-provisioner diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..c4dce64 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sc-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sc-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..9fea17a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sc-rbd-provisioner.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ rbd_provisioner_storage_class }} +provisioner: ceph.com/rbd +reclaimPolicy: {{ rbd_provisioner_reclaim_policy }} +parameters: + monitors: {{ rbd_provisioner_monitors }} + adminId: {{ rbd_provisioner_admin_id }} + adminSecretNamespace: {{ rbd_provisioner_namespace }} + adminSecretName: {{ rbd_provisioner_secret_name }} + pool: {{ rbd_provisioner_pool }} + userId: {{ rbd_provisioner_user_id }} + userSecretNamespace: {{ rbd_provisioner_user_secret_namespace }} + userSecretName: {{ rbd_provisioner_user_secret_name }} + fsType: "{{ rbd_provisioner_fs_type }}" + imageFormat: "{{ rbd_provisioner_image_format }}" + imageFeatures: {{ rbd_provisioner_image_features }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/secret-rbd-provisioner.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/secret-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..a3b66d6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/secret-rbd-provisioner.yml.j2 @@ -0,0 +1,18 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ rbd_provisioner_secret_name }} + namespace: {{ rbd_provisioner_namespace }} +type: Opaque +data: + secret: {{ rbd_provisioner_secret | b64encode }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ rbd_provisioner_user_secret_name }} + namespace: {{ rbd_provisioner_user_secret_namespace }} +type: Opaque +data: + key: {{ rbd_provisioner_user_secret | b64encode }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/helm/.gitkeep b/kubespray/extra_playbooks/roles/kubernetes-apps/helm/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/helm/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/helm/defaults/main.yml new file mode 100644 index 0000000..4dc1cca --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/helm/defaults/main.yml @@ -0,0 +1,2 @@ +--- +helm_enabled: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/helm/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/helm/tasks/main.yml new file mode 100644 index 0000000..fee247b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/helm/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Helm | Download helm + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.helm) }}" + +- name: Helm | Copy helm binary from download dir + copy: + src: "{{ local_release_dir }}/helm-{{ helm_version }}/linux-{{ image_arch }}/helm" + dest: "{{ bin_dir }}/helm" + mode: 0755 + remote_src: true + +- name: Helm | Get helm completion + command: "{{ bin_dir }}/helm completion bash" + changed_when: False + register: helm_completion + check_mode: False + +- name: Helm | Install helm completion + copy: + dest: /etc/bash_completion.d/helm.sh + content: "{{ helm_completion.stdout }}" + mode: 0755 + become: True diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS new file mode 100644 index 0000000..e8c0fcc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - kubespray-approvers +reviewers: + - kubespray-reviewers \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml new file mode 100644 index 0000000..4c8d97e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml @@ -0,0 +1,7 @@ +--- +alb_ingress_controller_namespace: kube-system +alb_ingress_aws_region: "us-east-1" + +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +alb_ingress_aws_debug: "false" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml new file mode 100644 index 0000000..8a188a4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml @@ -0,0 +1,36 @@ +--- + +- name: ALB Ingress Controller | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/alb_ingress" + state: directory + owner: root + group: root + mode: 0755 + +- name: ALB Ingress Controller | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/alb_ingress/{{ item.file }}" + mode: 0644 + with_items: + - { name: alb-ingress-clusterrole, file: alb-ingress-clusterrole.yml, type: clusterrole } + - { name: alb-ingress-clusterrolebinding, file: alb-ingress-clusterrolebinding.yml, type: clusterrolebinding } + - { name: alb-ingress-ns, file: alb-ingress-ns.yml, type: ns } + - { name: alb-ingress-sa, file: alb-ingress-sa.yml, type: sa } + - { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy } + register: alb_ingress_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: ALB Ingress Controller | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ alb_ingress_controller_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/alb_ingress/{{ item.item.file }}" + state: "latest" + with_items: "{{ alb_ingress_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrole.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrole.yml.j2 new file mode 100644 index 0000000..bc03095 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrole.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} +rules: + - apiGroups: ["", "extensions"] + resources: ["configmaps", "endpoints", "nodes", "pods", "secrets", "events", "ingresses", "ingresses/status", "services"] + verbs: ["list", "create", "get", "update", "watch", "patch"] + - apiGroups: ["", "extensions"] + resources: ["nodes", "pods", "secrets", "services", "namespaces"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..71068f4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} +subjects: + - kind: ServiceAccount + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} +roleRef: + kind: ClusterRole + name: alb-ingress + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-deploy.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-deploy.yml.j2 new file mode 100644 index 0000000..a3d2834 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-deploy.yml.j2 @@ -0,0 +1,74 @@ +# Application Load Balancer (ALB) Ingress Controller Deployment Manifest. +# This manifest details sensible defaults for deploying an ALB Ingress Controller. +# GitHub: https://github.com/coreos/alb-ingress-controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alb-ingress-controller + labels: + k8s-app: alb-ingress-controller + # Namespace the ALB Ingress Controller should run in. Does not impact which + # namespaces it's able to resolve ingress resource for. For limiting ingress + # namespace scope, see --watch-namespace. + namespace: {{ alb_ingress_controller_namespace }} +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: alb-ingress-controller + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: alb-ingress-controller + spec: + containers: + - args: + # Limit the namespace where this ALB Ingress Controller deployment will + # resolve ingress resources. If left commented, all namespaces are used. + #- --watch-namespace=your-k8s-namespace + + # Setting the ingress-class flag below will ensure that only ingress resources with the + # annotation kubernetes.io/ingress.class: "alb" are respected by the controller. You may + # choose any class you'd like for this controller to respect. + - --ingress-class=alb + # Name of your cluster. Used when naming resources created + # by the ALB Ingress Controller, providing distinction between + # clusters. + - --cluster-name={{ cluster_name }} + + # Enables logging on all outbound requests sent to the AWS API. + # If logging is desired, set to true. + # - ---aws-api-debug +{% if alb_ingress_aws_debug %} + - --aws-api-debug +{% endif %} + # Maximum number of times to retry the aws calls. + # defaults to 10. + # - --aws-max-retries=10 + + # AWS region this ingress controller will operate in. + # If unspecified, it will be discovered from ec2metadata. + # List of regions: http://docs.aws.amazon.com/general/latest/gr/rande.html#vpc_region +{% if alb_ingress_aws_region is defined %} + - --aws-region={{ alb_ingress_aws_region }} +{% endif %} + + image: "{{ alb_ingress_image_repo }}:{{ alb_ingress_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + name: server + resources: {} + terminationMessagePath: /dev/termination-log + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 +{% if rbac_enabled %} + serviceAccountName: alb-ingress +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2 new file mode 100644 index 0000000..9f57537 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ alb_ingress_controller_namespace }} + labels: + name: {{ alb_ingress_controller_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2 new file mode 100644 index 0000000..692e3e3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml new file mode 100644 index 0000000..b12a1a9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml @@ -0,0 +1,10 @@ +--- +cert_manager_namespace: "cert-manager" +cert_manager_user: 1001 +cert_manager_tolerations: [] +cert_manager_affinity: {} +cert_manager_nodeselector: {} + +## Change leader election namespace when deploying on GKE Autopilot that forbid the changes on kube-system namespace. +## See https://github.com/jetstack/cert-manager/issues/3717 +cert_manager_leader_election_namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml new file mode 100644 index 0000000..4af64ad --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -0,0 +1,56 @@ +--- + +- name: Cert Manager | Remove legacy addon dir and manifests + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: Cert Manager | Remove legacy namespace + command: > + {{ kubectl }} delete namespace {{ cert_manager_namespace }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: Cert Manager | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Cert Manager | Templates list + set_fact: + cert_manager_templates: + - { name: cert-manager, file: cert-manager.yml, type: all } + - { name: cert-manager.crds, file: cert-manager.crds.yml, type: crd } + +- name: Cert Manager | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}" + mode: 0644 + with_items: "{{ cert_manager_templates }}" + register: cert_manager_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Cert Manager | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}" + state: "latest" + with_items: "{{ cert_manager_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.crds.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.crds.yml.j2 new file mode 100644 index 0000000..854cc43 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.crds.yml.j2 @@ -0,0 +1,4414 @@ +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + categories: + - cert-manager + scope: Cluster + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + caBundle: + description: PEM-encoded CA bundle (base64-encoded) used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the cert-manager controller system root certificates are used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: CABundleSecretRef is a reference to a Secret which contains the CABundle which will be used when connecting to Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundleSecretRef nor CABundle are defined, the cert-manager controller system root certificates are used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: acme.cert-manager.io + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + key: + description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + type: string + enum: + - HTTP-01 + - DNS-01 + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + status: + type: object + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + served: true + storage: true + subresources: + status: {} +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + type: object + required: + - issuerRef + - request + properties: + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + additionalProperties: + type: array + items: + type: string + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: array + items: + type: string + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + type: string + format: byte + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + status: + description: Status of the CertificateRequest. This is set and managed automatically. + type: object + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + type: string + format: byte + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + type: string + format: byte + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + type: array + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + caBundle: + description: PEM-encoded CA bundle (base64-encoded) used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the cert-manager controller system root certificates are used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: CABundleSecretRef is a reference to a Secret which contains the CABundle which will be used when connecting to Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundleSecretRef nor CABundle are defined, the cert-manager controller system root certificates are used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the Issuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + type: object + required: + - issuerRef + - secretName + properties: + additionalOutputFormats: + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. + type: array + items: + description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. + type: object + required: + - type + properties: + type: + description: Type is the name of the format type that should be written to the Certificate's target Secret. + type: string + enum: + - DER + - CombinedPEM + commonName: + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + type: array + items: + type: string + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailAddresses: + description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate. + type: array + items: + type: string + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + type: array + items: + type: string + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + type: object + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + literalSubject: + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. + type: string + privateKey: + description: Options to control private keys used for the Certificate. + type: object + properties: + algorithm: + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA`,`Ed25519` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm. + type: string + enum: + - RSA + - ECDSA + - Ed25519 + encoding: + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. + type: string + enum: + - PKCS1 + - PKCS8 + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + enum: + - Never + - Always + size: + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. + type: integer + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + type: integer + format: int32 + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + secretTemplate: + description: SecretTemplate defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret. + type: object + properties: + annotations: + description: Annotations is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + labels: + description: Labels is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + type: object + properties: + countries: + description: Countries to be used on the Certificate. + type: array + items: + type: string + localities: + description: Cities to be used on the Certificate. + type: array + items: + type: string + organizationalUnits: + description: Organizational Units to be used on the Certificate. + type: array + items: + type: string + organizations: + description: Organizations to be used on the Certificate. + type: array + items: + type: string + postalCodes: + description: Postal codes to be used on the Certificate. + type: array + items: + type: string + provinces: + description: State/Provinces to be used on the Certificate. + type: array + items: + type: string + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + type: array + items: + type: string + uris: + description: URIs is a list of URI subjectAltNames to be set on the Certificate. + type: array + items: + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: Status of the Certificate. This is set and managed automatically. + type: object + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + type: array + items: + description: CertificateCondition contains condition information for an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + type: string + format: date-time + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + type: string + format: date-time + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + type: string + format: date-time + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + type: string + format: date-time + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: acme.cert-manager.io + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - issuerRef + - request + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + type: string + format: byte + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + served: true + storage: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 new file mode 100644 index 0000000..47500e5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 @@ -0,0 +1,1178 @@ +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [ "gateway.networking.k8s.io" ] + resources: [ "httproutes" ] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: + - 'cert-manager-webhook-ca' + verbs: ["get", "list", "watch", "update"] +# It's not possible to grant CREATE permission on a single resourceName. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" + spec: + serviceAccountName: cert-manager-cainjector + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "{{ cert_manager_cainjector_image_repo }}:{{ cert_manager_cainjector_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --v=2 + - --leader-election-namespace={{ cert_manager_leader_election_namespace }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault +{% if cert_manager_tolerations %} + tolerations: + {{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% if cert_manager_nodeselector %} + nodeSelector: + {{ cert_manager_nodeselector | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_affinity %} + affinity: + {{ cert_manager_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} +--- +{% if cert_manager_trusted_internal_ca is defined %} +apiVersion: v1 +data: + internal-ca.pem: | + {{ cert_manager_trusted_internal_ca | indent(width=4, first=False) }} +kind: ConfigMap +metadata: + name: ca-internal-truststore + namespace: {{ cert_manager_namespace }} +--- +{% endif %} +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "{{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace={{ cert_manager_leader_election_namespace }} + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{% if cert_manager_tolerations %} + tolerations: + {{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% if cert_manager_nodeselector %} + nodeSelector: + {{ cert_manager_nodeselector | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_affinity %} + affinity: + {{ cert_manager_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_trusted_internal_ca is defined %} + volumeMounts: + - mountPath: /etc/ssl/certs/internal-ca.pem + name: ca-internal-truststore + subPath: internal-ca.pem + volumes: + - configMap: + defaultMode: 420 + name: ca-internal-truststore + name: ca-internal-truststore +{% endif %} +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" + spec: + serviceAccountName: cert-manager-webhook + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "{{ cert_manager_webhook_image_repo }}:{{ cert_manager_webhook_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{% if cert_manager_tolerations %} + tolerations: + {{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% if cert_manager_nodeselector %} + nodeSelector: + {{ cert_manager_nodeselector | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_affinity %} + affinity: + {{ cert_manager_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" + annotations: + cert-manager.io/inject-ca-from-secret: "{{ cert_manager_namespace }}/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" + annotations: + cert-manager.io/inject-ca-from-secret: "{{ cert_manager_namespace }}/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + path: /validate diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml new file mode 100644 index 0000000..10cf1a7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml @@ -0,0 +1,20 @@ +--- +ingress_nginx_namespace: "ingress-nginx" +ingress_nginx_host_network: false +ingress_publish_status_address: "" +ingress_nginx_nodeselector: + kubernetes.io/os: "linux" +ingress_nginx_tolerations: [] +ingress_nginx_insecure_port: 80 +ingress_nginx_secure_port: 443 +ingress_nginx_metrics_port: 10254 +ingress_nginx_configmap: {} +ingress_nginx_configmap_tcp_services: {} +ingress_nginx_configmap_udp_services: {} +ingress_nginx_extra_args: [] +ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +ingress_nginx_webhook_enabled: false +ingress_nginx_webhook_job_ttl: 1800 + +ingress_nginx_probe_initial_delay_seconds: 10 \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml new file mode 100644 index 0000000..cc0ed71 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -0,0 +1,60 @@ +--- + +- name: NGINX Ingress Controller | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/ingress_nginx" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: NGINX Ingress Controller | Templates list + set_fact: + ingress_nginx_templates: + - { name: 00-namespace, file: 00-namespace.yml, type: ns } + - { name: cm-ingress-nginx, file: cm-ingress-nginx.yml, type: cm } + - { name: cm-tcp-services, file: cm-tcp-services.yml, type: cm } + - { name: cm-udp-services, file: cm-udp-services.yml, type: cm } + - { name: sa-ingress-nginx, file: sa-ingress-nginx.yml, type: sa } + - { name: clusterrole-ingress-nginx, file: clusterrole-ingress-nginx.yml, type: clusterrole } + - { name: clusterrolebinding-ingress-nginx, file: clusterrolebinding-ingress-nginx.yml, type: clusterrolebinding } + - { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role } + - { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding } + - { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds } + ingress_nginx_templates_for_webhook: + - { name: admission-webhook-configuration, file: admission-webhook-configuration.yml, type: sa } + - { name: sa-admission-webhook, file: sa-admission-webhook.yml, type: sa } + - { name: clusterrole-admission-webhook, file: clusterrole-admission-webhook.yml, type: clusterrole } + - { name: clusterrolebinding-admission-webhook, file: clusterrolebinding-admission-webhook.yml, type: clusterrolebinding } + - { name: role-admission-webhook, file: role-admission-webhook.yml, type: role } + - { name: rolebinding-admission-webhook, file: rolebinding-admission-webhook.yml, type: rolebinding } + - { name: admission-webhook-job, file: admission-webhook-job.yml, type: job } + +- name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Templates list for webhook + set_fact: + ingress_nginx_templates: "{{ ingress_nginx_templates + ingress_nginx_templates_for_webhook }}" + when: ingress_nginx_webhook_enabled + +- name: NGINX Ingress Controller | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}" + mode: 0644 + with_items: "{{ ingress_nginx_templates }}" + register: ingress_nginx_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: NGINX Ingress Controller | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ ingress_nginx_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}" + state: "latest" + with_items: "{{ ingress_nginx_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2 new file mode 100644 index 0000000..1f12366 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ ingress_nginx_namespace }} + labels: + name: {{ ingress_nginx_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-configuration.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-configuration.yml.j2 new file mode 100644 index 0000000..d6878a0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-configuration.yml.j2 @@ -0,0 +1,29 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: {{ ingress_nginx_namespace }} + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-job.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-job.yml.j2 new file mode 100644 index 0000000..03a8420 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-job.yml.j2 @@ -0,0 +1,86 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-create + namespace: {{ ingress_nginx_namespace }} +spec: + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: "{{ ingress_nginx_kube_webhook_certgen_imae_repo }}:{{ ingress_nginx_kube_webhook_certgen_imae_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + name: create + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission + ttlSecondsAfterFinished: {{ ingress_nginx_webhook_job_ttl }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-patch + namespace: {{ ingress_nginx_namespace }} +spec: + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: "{{ ingress_nginx_kube_webhook_certgen_imae_repo }}:{{ ingress_nginx_kube_webhook_certgen_imae_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + name: patch + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission + ttlSecondsAfterFinished: {{ ingress_nginx_webhook_job_ttl }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-admission-webhook.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-admission-webhook.yml.j2 new file mode 100644 index 0000000..daa4753 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-admission-webhook.yml.j2 @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 new file mode 100644 index 0000000..767502e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 @@ -0,0 +1,36 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: [""] + resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["extensions","networking.k8s.io"] + resources: ["ingresses/status"] + verbs: ["update"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingressclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "watch"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-admission-webhook.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-admission-webhook.yml.j2 new file mode 100644 index 0000000..8791594 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-admission-webhook.yml.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2 new file mode 100644 index 0000000..ad83dc2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 new file mode 100644 index 0000000..9f1e3bb --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +{% if ingress_nginx_configmap %} +data: + {{ ingress_nginx_configmap | to_nice_yaml | indent(2) }} +{%- endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 new file mode 100644 index 0000000..9752081 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-services + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +{% if ingress_nginx_configmap_tcp_services %} +data: + {{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }} +{%- endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 new file mode 100644 index 0000000..a3f6613 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: udp-services + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +{% if ingress_nginx_configmap_udp_services %} +data: + {{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }} +{%- endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 new file mode 100644 index 0000000..6ab4249 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 @@ -0,0 +1,141 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ingress-nginx-controller + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: {{ ingress_nginx_termination_grace_period_seconds }} +{% if ingress_nginx_host_network %} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet +{% endif %} +{% if ingress_nginx_nodeselector %} + nodeSelector: + {{ ingress_nginx_nodeselector | to_nice_yaml | indent(width=8) }} +{%- endif %} +{% if ingress_nginx_tolerations %} + tolerations: + {{ ingress_nginx_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} + priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + containers: + - name: ingress-nginx-controller + image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/ingress-nginx + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --annotations-prefix=nginx.ingress.kubernetes.io +{% if ingress_nginx_class is defined %} + - --ingress-class={{ ingress_nginx_class }} +{% else %} + - --watch-ingress-without-class=true +{% endif %} +{% if ingress_nginx_host_network %} + - --report-node-internal-ip-address +{% endif %} +{% if ingress_publish_status_address != "" %} + - --publish-status-address={{ ingress_publish_status_address }} +{% endif %} +{% for extra_arg in ingress_nginx_extra_args %} + - {{ extra_arg }} +{% endfor %} +{% if ingress_nginx_webhook_enabled %} + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key +{% endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 101 + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + ports: + - name: http + containerPort: 80 + hostPort: {{ ingress_nginx_insecure_port }} + - name: https + containerPort: 443 + hostPort: {{ ingress_nginx_secure_port }} + - name: metrics + containerPort: 10254 +{% if not ingress_nginx_host_network %} + hostPort: {{ ingress_nginx_metrics_port }} +{% endif %} +{% if ingress_nginx_webhook_enabled %} + - name: webhook + containerPort: 8443 + protocol: TCP +{% endif %} + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }} + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }} + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 +{% if ingress_nginx_webhook_enabled %} + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true +{% endif %} +{% if ingress_nginx_webhook_enabled %} + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.yml.j2 new file mode 100644 index 0000000..5d1bb01 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 new file mode 100644 index 0000000..58c0488 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 @@ -0,0 +1,68 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps", "pods", "secrets", "endpoints"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses", "ingressclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses/status"] + verbs: ["update"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingressclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: [{% if ingress_class is defined %}"ingress-controller-leader-{{ ingress_nginx_class | default('nginx') }}"{% else %}"ingress-controller-leader"{% endif %}] + verbs: ["get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: [{% if ingress_class is defined %}"ingress-controller-leader-{{ ingress_nginx_class | default('nginx') }}"{% else %}"ingress-controller-leader"{% endif %}] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["policy"] + resourceNames: ["ingress-nginx"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: [{% if ingress_class is defined %}"ingress-controller-leader-{{ ingress_nginx_class | default('nginx') }}"{% else %}"ingress-controller-leader"{% endif %}] + verbs: ["get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-admission-webhook.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-admission-webhook.yml.j2 new file mode 100644 index 0000000..671912d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-admission-webhook.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: +- kind: ServiceAccount + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2 new file mode 100644 index 0000000..142d400 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-admission-webhook.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-admission-webhook.yml.j2 new file mode 100644 index 0000000..488a045 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-admission-webhook.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2 new file mode 100644 index 0000000..305d553 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/meta/main.yml new file mode 100644 index 0000000..b269607 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/ingress_controller/meta/main.yml @@ -0,0 +1,22 @@ +--- +dependencies: + - role: kubernetes-apps/ingress_controller/ingress_nginx + when: ingress_nginx_enabled + tags: + - apps + - ingress-controller + - ingress-nginx + + - role: kubernetes-apps/ingress_controller/cert_manager + when: cert_manager_enabled + tags: + - apps + - ingress-controller + - cert-manager + + - role: kubernetes-apps/ingress_controller/alb_ingress_controller + when: ingress_alb_enabled + tags: + - apps + - ingress-controller + - ingress_alb diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/krew/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/defaults/main.yml new file mode 100644 index 0000000..d0de6b1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/defaults/main.yml @@ -0,0 +1,4 @@ +--- +krew_enabled: false +krew_root_dir: "/usr/local/krew" +krew_default_index_uri: https://github.com/kubernetes-sigs/krew-index.git diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/krew/tasks/krew.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/tasks/krew.yml new file mode 100644 index 0000000..bbc4dba --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/tasks/krew.yml @@ -0,0 +1,38 @@ +--- +- name: Krew | Download krew + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.krew) }}" + +- name: Krew | krew env + template: + src: krew.j2 + dest: /etc/bash_completion.d/krew + mode: 0644 + +- name: Krew | Copy krew manifest + template: + src: krew.yml.j2 + dest: "{{ local_release_dir }}/krew.yml" + mode: 0644 + +- name: Krew | Install krew # noqa 301 305 + shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml" + environment: + KREW_ROOT: "{{ krew_root_dir }}" + KREW_DEFAULT_INDEX_URI: "{{ krew_default_index_uri | default('') }}" + +- name: Krew | Get krew completion + command: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} completion bash" + changed_when: False + register: krew_completion + check_mode: False + ignore_errors: yes # noqa ignore-errors + +- name: Krew | Install krew completion + copy: + dest: /etc/bash_completion.d/krew.sh + content: "{{ krew_completion.stdout }}" + mode: 0755 + become: True + when: krew_completion.rc == 0 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/krew/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/tasks/main.yml new file mode 100644 index 0000000..40729e8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Krew | install krew on kube_control_plane + import_tasks: krew.yml + +- name: Krew | install krew on localhost + import_tasks: krew.yml + delegate_to: localhost + connection: local + run_once: true + when: kubectl_localhost diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/krew/templates/krew.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/templates/krew.j2 new file mode 100644 index 0000000..a666f6e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/templates/krew.j2 @@ -0,0 +1,6 @@ +# krew bash env(kubespray) +export KREW_ROOT="{{ krew_root_dir }}" +{% if krew_default_index_uri is defined %} +export KREW_DEFAULT_INDEX_URI='{{ krew_default_index_uri }}' +{% endif %} +export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/krew/templates/krew.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/templates/krew.yml.j2 new file mode 100644 index 0000000..b0c6152 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/krew/templates/krew.yml.j2 @@ -0,0 +1,100 @@ +apiVersion: krew.googlecontainertools.github.com/v1alpha2 +kind: Plugin +metadata: + name: krew +spec: + version: "{{ krew_version }}" + homepage: https://krew.sigs.k8s.io/ + shortDescription: Package manager for kubectl plugins. + caveats: | + krew is now installed! To start using kubectl plugins, you need to add + krew's installation directory to your PATH: + + * macOS/Linux: + - Add the following to your ~/.bashrc or ~/.zshrc: + export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" + - Restart your shell. + + * Windows: Add %USERPROFILE%\.krew\bin to your PATH environment variable + + To list krew commands and to get help, run: + $ kubectl krew + For a full list of available plugins, run: + $ kubectl krew search + + You can find documentation at + https://krew.sigs.k8s.io/docs/user-guide/quickstart/. + + platforms: + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-darwin_amd64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: darwin + arch: amd64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-darwin_arm64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: darwin + arch: arm64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-linux_amd64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: linux + arch: amd64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-linux_arm + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: linux + arch: arm + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-linux_arm64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: linux + arch: arm64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew.exe + files: + - from: ./krew-windows_amd64.exe + to: krew.exe + - from: ./LICENSE + to: . + selector: + matchLabels: + os: windows + arch: amd64 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/meta/main.yml new file mode 100644 index 0000000..9c19fde --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/meta/main.yml @@ -0,0 +1,126 @@ +--- +dependencies: + - role: kubernetes-apps/ansible + when: + - inventory_hostname == groups['kube_control_plane'][0] + + - role: kubernetes-apps/helm + when: + - helm_enabled + tags: + - helm + + - role: kubernetes-apps/krew + when: + - krew_enabled + tags: + - krew + + - role: kubernetes-apps/registry + when: + - registry_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - registry + + - role: kubernetes-apps/metrics_server + when: + - metrics_server_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - metrics_server + + - role: kubernetes-apps/csi_driver/csi_crd + when: + - cinder_csi_enabled or csi_snapshot_controller_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - csi-driver + + - role: kubernetes-apps/csi_driver/cinder + when: + - cinder_csi_enabled + tags: + - cinder-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/aws_ebs + when: + - aws_ebs_csi_enabled + tags: + - aws-ebs-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/azuredisk + when: + - azure_csi_enabled + tags: + - azure-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/gcp_pd + when: + - gcp_pd_csi_enabled + tags: + - gcp-pd-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/upcloud + when: + - upcloud_csi_enabled + tags: + - upcloud-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/vsphere + when: + - vsphere_csi_enabled + tags: + - vsphere-csi-driver + - csi-driver + + - role: kubernetes-apps/persistent_volumes + when: + - persistent_volumes_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - persistent_volumes + + - role: kubernetes-apps/snapshots + when: inventory_hostname == groups['kube_control_plane'][0] + tags: + - snapshots + - csi-driver + + - role: kubernetes-apps/container_runtimes + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - container-runtimes + + - role: kubernetes-apps/container_engine_accelerator + when: nvidia_accelerator_enabled + tags: + - container_engine_accelerator + + - role: kubernetes-apps/cloud_controller/oci + when: + - cloud_provider is defined + - cloud_provider == "oci" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - oci + + - role: kubernetes-apps/metallb + when: + - metallb_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - metallb + + - role: kubernetes-apps/argocd + when: + - argocd_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argocd diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/OWNERS b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/OWNERS new file mode 100644 index 0000000..b64c7bc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - oomichi diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/defaults/main.yml new file mode 100644 index 0000000..dc96fdc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/defaults/main.yml @@ -0,0 +1,23 @@ +--- +metallb_enabled: false +metallb_log_level: info +metallb_protocol: "layer2" +metallb_port: "7472" +metallb_memberlist_port: "7946" +metallb_peers: [] +metallb_speaker_enabled: "{{ metallb_enabled }}" +metallb_speaker_nodeselector: + kubernetes.io/os: "linux" +metallb_controller_nodeselector: + kubernetes.io/os: "linux" +metallb_speaker_tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists +metallb_controller_tolerations: [] +metallb_pool_name: "loadbalanced" +metallb_auto_assign: true +metallb_avoid_buggy_ips: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/tasks/main.yml new file mode 100644 index 0000000..e5920fc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: Kubernetes Apps | Check cluster settings for MetalLB + fail: + msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132" + when: + - "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp" + +- name: Kubernetes Apps | Check cluster settings for MetalLB + fail: + msg: "metallb_ip_range is mandatory to be specified for MetalLB" + when: + - metallb_ip_range is not defined or not metallb_ip_range + +- name: Kubernetes Apps | Check BGP peers for MetalLB + fail: + msg: "metallb_peers is mandatory when metallb_protocol is bgp and metallb_speaker_enabled" + when: + - metallb_protocol == 'bgp' and metallb_speaker_enabled + - metallb_peers is not defined or not metallb_peers + +- name: Kubernetes Apps | Check that the deprecated 'matallb_auto_assign' variable is not used anymore + fail: + msg: "'matallb_auto_assign' configuration variable is deprecated, please use 'metallb_auto_assign' instead" + when: + - matallb_auto_assign is defined + +- name: Kubernetes Apps | Check AppArmor status + command: which apparmor_parser + register: apparmor_status + when: + - podsecuritypolicy_enabled + - inventory_hostname == groups['kube_control_plane'][0] + failed_when: false + +- name: Kubernetes Apps | Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + when: + - podsecuritypolicy_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Lay Down MetalLB + become: true + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0644 + with_items: ["metallb.yml", "metallb-config.yml"] + register: "rendering" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Install and configure MetalLB + kube: + name: "MetalLB" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" + state: "{{ item.changed | ternary('latest','present') }}" + become: true + with_items: "{{ rendering.results }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/templates/metallb-config.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/templates/metallb-config.yml.j2 new file mode 100644 index 0000000..8fda506 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/templates/metallb-config.yml.j2 @@ -0,0 +1,54 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | +{% if metallb_peers | length > 0 %} + peers: +{% for peer in metallb_peers %} + - peer-address: {{ peer.peer_address }} + peer-asn: {{ peer.peer_asn }} + my-asn: {{ peer.my_asn }} +{% if peer.password is defined %} + password: "{{ peer.password }}" +{% endif %} +{% if peer.source_address is defined %} + source-address: {{ peer.source_address }} +{% endif %} +{% if peer.node_selectors is defined %} + node-selectors: + {{ peer.node_selectors | to_yaml(indent=2, width=1337) | indent(8) }} +{% endif %} +{% endfor %} +{% endif %} + address-pools: + - name: {{ metallb_pool_name }} + protocol: {{ metallb_protocol }} + addresses: +{% for ip_range in metallb_ip_range %} + - {{ ip_range }} +{% endfor %} +{% if metallb_auto_assign == false %} + auto-assign: false +{% endif %} +{% if metallb_avoid_buggy_ips == true %} + avoid-buggy-ips: true +{% endif %} +{% if metallb_additional_address_pools is defined %}{% for pool in metallb_additional_address_pools %} + - name: {{ pool }} + protocol: {{ metallb_additional_address_pools[pool].protocol }} + addresses: +{% for ip_range in metallb_additional_address_pools[pool].ip_range %} + - {{ ip_range }} +{% endfor %} +{% if metallb_additional_address_pools[pool].auto_assign is defined %} + auto-assign: {{ metallb_additional_address_pools[pool].auto_assign }} +{% endif %} +{% if metallb_additional_address_pools[pool].avoid_buggy_ips is defined %} + avoid-buggy-ips: {{ metallb_additional_address_pools[pool].avoid_buggy_ips }} +{% endif %} +{% endfor %} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 new file mode 100644 index 0000000..fc03cd2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 @@ -0,0 +1,425 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: metallb-system + labels: + app: metallb +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +{% if metallb_speaker_enabled %} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +{% endif %} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - services/status + verbs: + - update +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +--- +{% if metallb_speaker_enabled %} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: +- apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +{% endif %} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - secrets + verbs: + - create +- apiGroups: + - '' + resources: + - secrets + resourceNames: + - memberlist + verbs: + - list +- apiGroups: + - apps + resources: + - deployments + resourceNames: + - controller + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +{% if metallb_speaker_enabled %} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +{% endif %} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: config-watcher +subjects: +- kind: ServiceAccount + name: controller +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: controller +subjects: +- kind: ServiceAccount + name: controller +--- +{% if metallb_speaker_enabled %} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: '{{ metallb_port }}' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port={{ metallb_port }} + - --config=config + - --log-level={{ metallb_log_level }} + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + # needed when another software is also using memberlist / port 7946 + # when changing this default you also need to update the container ports definition + # and the PodSecurityPolicy hostPorts definition + #- name: METALLB_ML_BIND_PORT + # value: "{{ metallb_memberlist_port }}" + - name: METALLB_ML_LABELS + value: "app=metallb,component=speaker" + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + name: memberlist + key: secretkey + image: {{ metallb_speaker_image_repo }}:{{ metallb_version }} + name: speaker + ports: + - containerPort: {{ metallb_port }} + name: monitoring + - containerPort: {{ metallb_memberlist_port }} + name: memberlist-tcp + - containerPort: {{ metallb_memberlist_port }} + name: memberlist-udp + protocol: UDP + livenessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + readOnlyRootFilesystem: true + hostNetwork: true +{% if metallb_speaker_nodeselector %} + nodeSelector: + {{ metallb_speaker_nodeselector | to_nice_yaml | indent(width=8) }} +{%- endif %} + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 +{% if metallb_speaker_tolerations %} + tolerations: + {{ metallb_speaker_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% endif %} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: '{{ metallb_port }}' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: controller + spec: + priorityClassName: system-cluster-critical +{% if metallb_controller_tolerations %} + tolerations: + {{ metallb_controller_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} + containers: + - args: + - --port={{ metallb_port }} + - --config=config + - --log-level={{ metallb_log_level }} + env: + - name: METALLB_ML_SECRET_NAME + value: memberlist + - name: METALLB_DEPLOYMENT + value: controller + image: {{ metallb_controller_image_repo }}:{{ metallb_version }} + name: controller + ports: + - containerPort: {{ metallb_port }} + name: monitoring + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 +{% if metallb_controller_nodeselector %} + nodeSelector: + {{ metallb_controller_nodeselector | to_nice_yaml | indent(width=8) }} +{%- endif %} + securityContext: + runAsNonRoot: true + runAsUser: 65534 + fsGroup: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/defaults/main.yml new file mode 100644 index 0000000..4e247a1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/defaults/main.yml @@ -0,0 +1,11 @@ +--- +metrics_server_container_port: 4443 +metrics_server_kubelet_insecure_tls: true +metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +metrics_server_metric_resolution: 15s +metrics_server_limits_cpu: 100m +metrics_server_limits_memory: 200Mi +metrics_server_requests_cpu: 100m +metrics_server_requests_memory: 200Mi +metrics_server_host_network: false +metrics_server_replicas: 1 \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/tasks/main.yml new file mode 100644 index 0000000..1fe617d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/tasks/main.yml @@ -0,0 +1,57 @@ +--- +# If all masters have node role, there are no tainted master and toleration should not be specified. +- name: Check all masters are node or not + set_fact: + masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}" + +- name: Metrics Server | Delete addon dir + file: + path: "{{ kube_config_dir }}/addons/metrics_server" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: Metrics Server | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/metrics_server" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Metrics Server | Templates list + set_fact: + metrics_server_templates: + - { name: auth-delegator, file: auth-delegator.yaml, type: clusterrolebinding } + - { name: auth-reader, file: auth-reader.yaml, type: rolebinding } + - { name: metrics-server-sa, file: metrics-server-sa.yaml, type: sa } + - { name: metrics-server-deployment, file: metrics-server-deployment.yaml, type: deploy } + - { name: metrics-server-service, file: metrics-server-service.yaml, type: service } + - { name: metrics-apiservice, file: metrics-apiservice.yaml, type: service } + - { name: resource-reader-clusterrolebinding, file: resource-reader-clusterrolebinding.yaml, type: clusterrolebinding } + - { name: resource-reader, file: resource-reader.yaml, type: clusterrole } + +- name: Metrics Server | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/metrics_server/{{ item.file }}" + mode: 0644 + with_items: "{{ metrics_server_templates }}" + register: metrics_server_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Metrics Server | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/metrics_server/{{ item.item.file }}" + state: "latest" + with_items: "{{ metrics_server_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2 new file mode 100644 index 0000000..92f8204 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2 @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-server:system:auth-delegator + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2 new file mode 100644 index 0000000..e02b8ea --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2 @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metrics-server-auth-reader + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2 new file mode 100644 index 0000000..9341687 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2 @@ -0,0 +1,15 @@ +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + service: + name: metrics-server + namespace: kube-system + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: {{ metrics_server_kubelet_insecure_tls }} + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 new file mode 100644 index 0000000..86247b9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 @@ -0,0 +1,107 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metrics-server + namespace: kube-system + labels: + app.kubernetes.io/name: metrics-server + addonmanager.kubernetes.io/mode: Reconcile + version: {{ metrics_server_version }} +spec: + replicas: {{ metrics_server_replicas }} + selector: + matchLabels: + app.kubernetes.io/name: metrics-server + version: {{ metrics_server_version }} + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + name: metrics-server + labels: + app.kubernetes.io/name: metrics-server + version: {{ metrics_server_version }} + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + hostNetwork: {{ metrics_server_host_network | default(false) }} + containers: + - name: metrics-server + image: {{ metrics_server_image_repo }}:{{ metrics_server_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --logtostderr + - --cert-dir=/tmp + - --secure-port={{ metrics_server_container_port }} +{% if metrics_server_kubelet_preferred_address_types %} + - --kubelet-preferred-address-types={{ metrics_server_kubelet_preferred_address_types }} +{% endif %} + - --kubelet-use-node-status-port +{% if metrics_server_kubelet_insecure_tls %} + - --kubelet-insecure-tls +{% endif %} + - --metric-resolution={{ metrics_server_metric_resolution }} + ports: + - containerPort: {{ metrics_server_container_port }} + name: https + protocol: TCP + volumeMounts: + - name: tmp + mountPath: /tmp + livenessProbe: + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + failureThreshold: 3 + initialDelaySeconds: 40 + readinessProbe: + httpGet: + path: /readyz + port: https + scheme: HTTPS + periodSeconds: 10 + failureThreshold: 3 + initialDelaySeconds: 40 + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + allowPrivilegeEscalation: false + resources: + limits: + cpu: {{ metrics_server_limits_cpu }} + memory: {{ metrics_server_limits_memory }} + requests: + cpu: {{ metrics_server_requests_cpu }} + memory: {{ metrics_server_requests_memory }} + volumes: + - name: tmp + emptyDir: {} +{% if not masters_are_not_tainted %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% endif %} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - metrics-server + topologyKey: kubernetes.io/hostname + namespaces: + - kube-system \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2 new file mode 100644 index 0000000..94444ca --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metrics-server + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2 new file mode 100644 index 0000000..f1c3691 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: metrics-server + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: "metrics-server" +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: metrics-server + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2 new file mode 100644 index 0000000..038cfd8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:metrics-server + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2 new file mode 100644 index 0000000..3d9ea81 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:metrics-server + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/metrics + verbs: + - get + - list + - watch diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml new file mode 100644 index 0000000..b8b4338 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# TODO: Handle Calico etcd -> kdd migration diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml new file mode 100644 index 0000000..db7e3f2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Canal | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ canal_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml new file mode 100644 index 0000000..ff56d24 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Flannel | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ flannel_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + +- name: Flannel | Wait for flannel subnet.env file presence + wait_for: + path: /run/flannel/subnet.env + delay: 5 + timeout: 600 diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml new file mode 100644 index 0000000..9f42501 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Kube-OVN | Start Resources + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ kube_ovn_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-router/OWNERS b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-router/OWNERS new file mode 100644 index 0000000..c95aad2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-router/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - bozzo +reviewers: + - bozzo \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml new file mode 100644 index 0000000..25f9a71 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -0,0 +1,23 @@ +--- + +- name: kube-router | Start Resources + kube: + name: "kube-router" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/kube-router.yml" + resource: "ds" + namespace: "kube-system" + state: "latest" + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true + +- name: kube-router | Wait for kube-router pods to be ready + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors + register: pods_not_ready + until: pods_not_ready.stdout.find("kube-router")==-1 + retries: 30 + delay: 10 + ignore_errors: true + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true + changed_when: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/meta/main.yml new file mode 100644 index 0000000..976e6ec --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -0,0 +1,36 @@ +--- +dependencies: + - role: kubernetes-apps/network_plugin/calico + when: kube_network_plugin == 'calico' + tags: + - calico + + - role: kubernetes-apps/network_plugin/canal + when: kube_network_plugin == 'canal' + tags: + - canal + + - role: kubernetes-apps/network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: + - flannel + + - role: kubernetes-apps/network_plugin/kube-ovn + when: kube_network_plugin == 'kube-ovn' + tags: + - kube-ovn + + - role: kubernetes-apps/network_plugin/weave + when: kube_network_plugin == 'weave' + tags: + - weave + + - role: kubernetes-apps/network_plugin/kube-router + when: kube_network_plugin == 'kube-router' + tags: + - kube-router + + - role: kubernetes-apps/network_plugin/multus + when: kube_network_plugin_multus + tags: + - multus diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml new file mode 100644 index 0000000..232d3e4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Multus | Start resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml new file mode 100644 index 0000000..bc0f932 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -0,0 +1,21 @@ +--- + +- name: Weave | Start Resources + kube: + name: "weave-net" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/weave-net.yml" + resource: "ds" + namespace: "kube-system" + state: "latest" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Weave | Wait for Weave to become available + uri: + url: http://127.0.0.1:6784/status + return_content: yes + register: weave_status + retries: 180 + delay: 5 + until: "weave_status.status == 200 and 'Status: ready' in weave_status.content" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS new file mode 100644 index 0000000..6e44ceb --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - alijahnas +reviewers: diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml new file mode 100644 index 0000000..896d2d3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# To restrict which AZ the volume should be provisioned in +# set this value to true and set the list of relevant AZs +# For it to work, the flag aws_ebs_csi_enable_volume_scheduling +# in AWS EBS Driver must be true +restrict_az_provisioning: false +aws_ebs_availability_zones: + - eu-west-3c diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml new file mode 100644 index 0000000..b49acdf --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy AWS EBS CSI Storage Class template + template: + src: "aws-ebs-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add AWS EBS CSI Storage Class + kube: + name: aws-ebs-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/templates/aws-ebs-csi-storage-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/templates/aws-ebs-csi-storage-class.yml.j2 new file mode 100644 index 0000000..1632646 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/templates/aws-ebs-csi-storage-class.yml.j2 @@ -0,0 +1,18 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: ebs-sc +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer +parameters: + csi.storage.k8s.io/fstype: xfs + type: gp2 +{% if restrict_az_provisioning %} +allowedTopologies: +- matchLabelExpressions: + - key: topology.ebs.csi.aws.com/zone + values: +{% for value in aws_ebs_availability_zones %} + - {{ value }} +{% endfor %} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml new file mode 100644 index 0000000..fc92e17 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml @@ -0,0 +1,3 @@ +--- +## Available values: Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS +storage_account_type: StandardSSD_LRS diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml new file mode 100644 index 0000000..9abffbe --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy Azure CSI Storage Class template + template: + src: "azure-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/azure-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add Azure CSI Storage Class + kube: + name: azure-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/azure-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2 new file mode 100644 index 0000000..be5cb38 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: disk.csi.azure.com +provisioner: disk.csi.azure.com +parameters: + skuname: {{ storage_account_type }} +{% if azure_csi_tags is defined %} + tags: {{ azure_csi_tags }} +{% endif %} +reclaimPolicy: Delete +volumeBindingMode: Immediate +allowVolumeExpansion: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml new file mode 100644 index 0000000..5e35dd5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml @@ -0,0 +1,7 @@ +--- +storage_classes: + - name: cinder-csi + is_default: false + parameters: + availability: nova + allowVolumeExpansion: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml new file mode 100644 index 0000000..52de1c5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy Cinder CSI Storage Class template + template: + src: "cinder-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/cinder-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add Cinder CSI Storage Class + kube: + name: cinder-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/cinder-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/templates/cinder-csi-storage-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/templates/cinder-csi-storage-class.yml.j2 new file mode 100644 index 0000000..be8ba13 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/cinder-csi/templates/cinder-csi-storage-class.yml.j2 @@ -0,0 +1,25 @@ +{% for class in storage_classes %} +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +provisioner: cinder.csi.openstack.org +volumeBindingMode: WaitForFirstConsumer +parameters: +{% for key, value in (class.parameters | default({})).items() %} + "{{ key }}": "{{ value }}" +{% endfor %} +{% if cinder_topology is defined and cinder_topology is sameas true %} +allowedTopologies: +- matchLabelExpressions: + - key: topology.cinder.csi.openstack.org/zone + values: +{% for zone in cinder_topology_zones %} + - "{{ zone }}" +{% endfor %} +{% endif %} +allowVolumeExpansion: {{ expand_persistent_volumes }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml new file mode 100644 index 0000000..d58706f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# Choose between pd-standard and pd-ssd +gcp_pd_csi_volume_type: pd-standard +gcp_pd_regional_replication_enabled: false +gcp_pd_restrict_zone_replication: false +gcp_pd_restricted_zones: + - europe-west1-b + - europe-west1-c diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml new file mode 100644 index 0000000..29997e7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy GCP PD CSI Storage Class template + template: + src: "gcp-pd-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add GCP PD CSI Storage Class + kube: + name: gcp-pd-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/templates/gcp-pd-csi-storage-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/templates/gcp-pd-csi-storage-class.yml.j2 new file mode 100644 index 0000000..475eb4f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/templates/gcp-pd-csi-storage-class.yml.j2 @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-gce-pd +provisioner: pd.csi.storage.gke.io +parameters: + type: {{ gcp_pd_csi_volume_type }} +{% if gcp_pd_regional_replication_enabled %} + replication-type: regional-pd +{% endif %} +volumeBindingMode: WaitForFirstConsumer +{% if gcp_pd_restrict_zone_replication %} +allowedTopologies: +- matchLabelExpressions: + - key: topology.gke.io/zone + values: +{% for value in gcp_pd_restricted_zones %} + - {{ value }} +{% endfor %} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/meta/main.yml new file mode 100644 index 0000000..fdfd807 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/meta/main.yml @@ -0,0 +1,43 @@ +--- +dependencies: + - role: kubernetes-apps/persistent_volumes/openstack + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack' ] + tags: + - persistent_volumes_openstack + + - role: kubernetes-apps/persistent_volumes/cinder-csi + when: + - cinder_csi_enabled + tags: + - persistent_volumes_cinder_csi + - cinder-csi-driver + + - role: kubernetes-apps/persistent_volumes/aws-ebs-csi + when: + - aws_ebs_csi_enabled + tags: + - persistent_volumes_aws_ebs_csi + - aws-ebs-csi-driver + + - role: kubernetes-apps/persistent_volumes/azuredisk-csi + when: + - azure_csi_enabled + tags: + - persistent_volumes_azure_csi + - azure-csi-driver + + - role: kubernetes-apps/persistent_volumes/gcp-pd-csi + when: + - gcp_pd_csi_enabled + tags: + - persistent_volumes_gcp_pd_csi + - gcp-pd-csi-driver + + - role: kubernetes-apps/persistent_volumes/upcloud-csi + when: + - upcloud_csi_enabled + tags: + - persistent_volumes_upcloud_csi + - upcloud-csi-driver \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml new file mode 100644 index 0000000..05a3d94 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml @@ -0,0 +1,7 @@ +--- +persistent_volumes_enabled: false +storage_classes: + - name: standard + is_default: true + parameters: + availability: nova diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml new file mode 100644 index 0000000..3387e7f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template + template: + src: "openstack-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/openstack-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class + kube: + name: storage-class + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/openstack-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 new file mode 100644 index 0000000..0551e15 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 @@ -0,0 +1,15 @@ +{% for class in storage_classes %} +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +provisioner: kubernetes.io/cinder +parameters: +{% for key, value in (class.parameters | default({})).items() %} + "{{ key }}": "{{ value }}" +{% endfor %} +allowVolumeExpansion: {{ expand_persistent_volumes }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml new file mode 100644 index 0000000..5986e8c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml @@ -0,0 +1,12 @@ +--- +storage_classes: + - name: standard + is_default: true + expand_persistent_volumes: true + parameters: + tier: maxiops + - name: hdd + is_default: false + expand_persistent_volumes: true + parameters: + tier: hdd diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/tasks/main.yml new file mode 100644 index 0000000..26104a0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy UpCloud CSI Storage Class template + template: + src: "upcloud-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/upcloud-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add UpCloud CSI Storage Class + kube: + name: upcloud-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/upcloud-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 new file mode 100644 index 0000000..a40df9b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 @@ -0,0 +1,16 @@ +{% for class in storage_classes %} +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +provisioner: storage.csi.upcloud.com +reclaimPolicy: Delete +parameters: +{% for key, value in (class.parameters | default({})).items() %} + "{{ key }}": "{{ value }}" +{% endfor %} +allowVolumeExpansion: {{ class.expand_persistent_volumes | default(true) | ternary("true","false") }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml new file mode 100644 index 0000000..33f5269 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# Limits for calico apps +calico_policy_controller_cpu_limit: 1000m +calico_policy_controller_memory_limit: 256M +calico_policy_controller_cpu_requests: 30m +calico_policy_controller_memory_requests: 64M +calico_policy_controller_deployment_nodeselector: "kubernetes.io/os: linux" + +# SSL +calico_cert_dir: "/etc/calico/certs" +canal_cert_dir: "/etc/canal/certs" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml new file mode 100644 index 0000000..e4169b2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Set cert dir + set_fact: + calico_cert_dir: "{{ canal_cert_dir }}" + when: + - kube_network_plugin == 'canal' + tags: + - facts + - canal + +- name: Create calico-kube-controllers manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment} + - {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa} + - {name: calico-kube-controllers, file: calico-kube-cr.yml, type: clusterrole} + - {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding} + register: calico_kube_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + - rbac_enabled or item.type not in rbac_resources + +- name: Start of Calico kube controllers + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_kube_manifests.results }}" + register: calico_kube_controller_start + until: calico_kube_controller_start is succeeded + retries: 4 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 new file mode 100644 index 0000000..f89e4d6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + k8s-app: calico-kube-controllers + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + {{ calico_policy_controller_deployment_nodeselector }} +{% if calico_datastore == "etcd" %} + hostNetwork: true +{% endif %} + serviceAccountName: calico-kube-controllers + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% if policy_controller_extra_tolerations is defined %} + {{ policy_controller_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} +{% endif %} + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ calico_policy_controller_cpu_limit }} + memory: {{ calico_policy_controller_memory_limit }} + requests: + cpu: {{ calico_policy_controller_cpu_requests }} + memory: {{ calico_policy_controller_memory_requests }} + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + env: +{% if calico_datastore == "kdd" %} + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes +{% else %} + - name: ETCD_ENDPOINTS + value: "{{ etcd_access_addresses }}" + - name: ETCD_CA_CERT_FILE + value: "{{ calico_cert_dir }}/ca_cert.crt" + - name: ETCD_CERT_FILE + value: "{{ calico_cert_dir }}/cert.crt" + - name: ETCD_KEY_FILE + value: "{{ calico_cert_dir }}/key.pem" + volumeMounts: + - mountPath: {{ calico_cert_dir }} + name: etcd-certs + readOnly: true + volumes: + - hostPath: + path: {{ calico_cert_dir }} + name: etcd-certs +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 new file mode 100644 index 0000000..f74b291 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 @@ -0,0 +1,110 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers + namespace: kube-system +rules: +{% if calico_datastore == "etcd" %} + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + - nodes + - serviceaccounts + verbs: + - watch + - list + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - watch + - list +{% elif calico_datastore == "kdd" %} + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - watch + - list + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 new file mode 100644 index 0000000..8168056 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 new file mode 100644 index 0000000..269d0a1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/meta/main.yml new file mode 100644 index 0000000..3f46b8d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/policy_controller/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: policy_controller/calico + when: + - kube_network_plugin in ['calico', 'canal'] + - enable_network_policy + tags: + - policy-controller diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/defaults/main.yml new file mode 100644 index 0000000..6353b7c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/defaults/main.yml @@ -0,0 +1,48 @@ +--- +registry_namespace: "kube-system" +registry_storage_class: "" +registry_storage_access_mode: "ReadWriteOnce" +registry_disk_size: "10Gi" +registry_port: 5000 +registry_replica_count: 1 + +# type of service: ClusterIP, LoadBalancer or NodePort +registry_service_type: "ClusterIP" +# you can specify your cluster IP address when registry_service_type is ClusterIP +registry_service_cluster_ip: "" +# you can specify your cloud provider assigned loadBalancerIP when registry_service_type is LoadBalancer +registry_service_loadbalancer_ip: "" +# annotations for managing Cloud Load Balancers +registry_service_annotations: {} +# you can specify the node port when registry_service_type is NodePort +registry_service_nodeport: "" + +# name of kubernetes secret for registry TLS certs +registry_tls_secret: "" + +registry_htpasswd: "" + +# registry configuration +# see: https://docs.docker.com/registry/configuration/#list-of-configuration-options +registry_config: + version: 0.1 + log: + fields: + service: registry + storage: + cache: + blobdescriptor: inmemory + http: + addr: :{{ registry_port }} + headers: + X-Content-Type-Options: [nosniff] + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + +registry_ingress_annotations: {} +registry_ingress_host: "" +# name of kubernetes secret for registry ingress TLS certs +registry_ingress_tls_secret: "" diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/tasks/main.yml new file mode 100644 index 0000000..5090212 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/tasks/main.yml @@ -0,0 +1,109 @@ +--- +- name: Registry | check registry_service_type value + fail: + msg: "registry_service_type can only be 'ClusterIP', 'LoadBalancer' or 'NodePort'" + when: registry_service_type not in ['ClusterIP', 'LoadBalancer', 'NodePort'] + +- name: Registry | Stop if registry_service_cluster_ip is defined when registry_service_type is not 'ClusterIP' + fail: + msg: "registry_service_cluster_ip support only compatible with ClusterIP." + when: + - registry_service_cluster_ip is defined and registry_service_cluster_ip|length > 0 + - registry_service_type != "ClusterIP" + +- name: Registry | Stop if registry_service_loadbalancer_ip is defined when registry_service_type is not 'LoadBalancer' + fail: + msg: "registry_service_loadbalancer_ip support only compatible with LoadBalancer." + when: + - registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip|length > 0 + - registry_service_type != "LoadBalancer" + +- name: Registry | Stop if registry_service_nodeport is defined when registry_service_type is not 'NodePort' + fail: + msg: "registry_service_nodeport support only compatible with NodePort." + when: + - registry_service_nodeport is defined and registry_service_nodeport|length > 0 + - registry_service_type != "NodePort" + +- name: Registry | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/registry" + state: directory + owner: root + group: root + mode: 0755 + +- name: Registry | Templates list + set_fact: + registry_templates: + - { name: registry-ns, file: registry-ns.yml, type: ns } + - { name: registry-sa, file: registry-sa.yml, type: sa } + - { name: registry-svc, file: registry-svc.yml, type: svc } + - { name: registry-secrets, file: registry-secrets.yml, type: secrets } + - { name: registry-cm, file: registry-cm.yml, type: cm } + - { name: registry-rs, file: registry-rs.yml, type: rs } + registry_templates_for_psp: + - { name: registry-psp, file: registry-psp.yml, type: psp } + - { name: registry-cr, file: registry-cr.yml, type: clusterrole } + - { name: registry-crb, file: registry-crb.yml, type: rolebinding } + +- name: Registry | Append extra templates to Registry Templates list for PodSecurityPolicy + set_fact: + registry_templates: "{{ registry_templates[:2] + registry_templates_for_psp + registry_templates[2:] }}" + when: + - podsecuritypolicy_enabled + - registry_namespace != "kube-system" + +- name: Registry | Append nginx ingress templates to Registry Templates list when ingress enabled + set_fact: + registry_templates: "{{ registry_templates + [item] }}" + with_items: + - [{ name: registry-ing, file: registry-ing.yml, type: ing }] + when: ingress_nginx_enabled or ingress_alb_enabled + +- name: Registry | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + mode: 0644 + with_items: "{{ registry_templates }}" + register: registry_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Registry | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ registry_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" + state: "latest" + with_items: "{{ registry_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Registry | Create PVC manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + mode: 0644 + with_items: + - { name: registry-pvc, file: registry-pvc.yml, type: pvc } + register: registry_manifests + when: + - registry_storage_class != none and registry_storage_class + - registry_disk_size != none and registry_disk_size + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Registry | Apply PVC manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ registry_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" + state: "latest" + with_items: "{{ registry_manifests.results }}" + when: + - registry_storage_class != none and registry_storage_class + - registry_disk_size != none and registry_disk_size + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-cm.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-cm.yml.j2 new file mode 100644 index 0000000..b633dfd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-cm.yml.j2 @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: registry-config + namespace: {{ registry_namespace }} +{% if registry_config %} +data: + config.yml: |- + {{ registry_config | to_yaml(indent=2, width=1337) | indent(width=4) }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 new file mode 100644 index 0000000..45f3fc4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:registry + namespace: {{ registry_namespace }} +rules: + - apiGroups: + - policy + resourceNames: + - registry + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2 new file mode 100644 index 0000000..8589420 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2 @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:registry + namespace: {{ registry_namespace }} +subjects: + - kind: ServiceAccount + name: registry + namespace: {{ registry_namespace }} +roleRef: + kind: ClusterRole + name: psp:registry + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-ing.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-ing.yml.j2 new file mode 100644 index 0000000..29dfbba --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-ing.yml.j2 @@ -0,0 +1,27 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry + namespace: {{ registry_namespace }} +{% if registry_ingress_annotations %} + annotations: + {{ registry_ingress_annotations | to_nice_yaml(indent=2, width=1337) | indent(width=4) }} +{% endif %} +spec: +{% if registry_ingress_tls_secret %} + tls: + - hosts: + - {{ registry_ingress_host }} + secretName: {{ registry_ingress_tls_secret }} +{% endif %} + rules: + - host: {{ registry_ingress_host }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: registry + port: + number: {{ registry_port }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 new file mode 100644 index 0000000..c224337 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ registry_namespace }} + labels: + name: {{ registry_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 new file mode 100644 index 0000000..b04d8c2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: registry + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 new file mode 100644 index 0000000..dc3fa5a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: registry-pvc + namespace: {{ registry_namespace }} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + accessModes: + - {{ registry_storage_access_mode }} + storageClassName: {{ registry_storage_class }} + resources: + requests: + storage: {{ registry_disk_size }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 new file mode 100644 index 0000000..47519f9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 @@ -0,0 +1,115 @@ +--- +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: registry + namespace: {{ registry_namespace }} + labels: + k8s-app: registry + version: v{{ registry_image_tag }} + addonmanager.kubernetes.io/mode: Reconcile +spec: +{% if registry_storage_class != "" and registry_storage_access_mode == "ReadWriteMany" %} + replicas: {{ registry_replica_count }} +{% else %} + replicas: 1 +{% endif %} + selector: + matchLabels: + k8s-app: registry + version: v{{ registry_image_tag }} + template: + metadata: + labels: + k8s-app: registry + version: v{{ registry_image_tag }} + spec: + priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccountName: registry + securityContext: + fsGroup: 1000 + runAsUser: 1000 + containers: + - name: registry + image: {{ registry_image_repo }}:{{ registry_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /bin/registry + - serve + - /etc/docker/registry/config.yml + env: + - name: REGISTRY_HTTP_ADDR + value: :{{ registry_port }} + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: /var/lib/registry +{% if registry_htpasswd != "" %} + - name: REGISTRY_AUTH + value: "htpasswd" + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: "Registry Realm" + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: "/auth/htpasswd" +{% endif %} +{% if registry_tls_secret != "" %} + - name: REGISTRY_HTTP_TLS_CERTIFICATE + value: /etc/ssl/docker/tls.crt + - name: REGISTRY_HTTP_TLS_KEY + value: /etc/ssl/docker/tls.key +{% endif %} + volumeMounts: + - name: registry-pvc + mountPath: /var/lib/registry + - name: registry-config + mountPath: /etc/docker/registry +{% if registry_htpasswd != "" %} + - name: auth + mountPath: /auth + readOnly: true +{% endif %} +{% if registry_tls_secret != "" %} + - name: tls-cert + mountPath: /etc/ssl/docker + readOnly: true +{% endif %} + ports: + - containerPort: {{ registry_port }} + name: registry + protocol: TCP + livenessProbe: + httpGet: +{% if registry_tls_secret != "" %} + scheme: HTTPS +{% endif %} + path: / + port: {{ registry_port }} + readinessProbe: + httpGet: +{% if registry_tls_secret != "" %} + scheme: HTTPS +{% endif %} + path: / + port: {{ registry_port }} + volumes: + - name: registry-pvc +{% if registry_storage_class != "" %} + persistentVolumeClaim: + claimName: registry-pvc +{% else %} + emptyDir: {} +{% endif %} + - name: registry-config + configMap: + name: registry-config +{% if registry_htpasswd != "" %} + - name: auth + secret: + secretName: registry-secret + items: + - key: htpasswd + path: htpasswd +{% endif %} +{% if registry_tls_secret != "" %} + - name: tls-cert + secret: + secretName: {{ registry_tls_secret }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 new file mode 100644 index 0000000..20f9515 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: registry + namespace: {{ registry_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2 new file mode 100644 index 0000000..80727d2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2 @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: registry-secret + namespace: {{ registry_namespace }} +type: Opaque +data: +{% if registry_htpasswd != "" %} + htpasswd: {{ registry_htpasswd | b64encode }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 new file mode 100644 index 0000000..5485aa8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: registry + namespace: {{ registry_namespace }} + labels: + k8s-app: registry + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeRegistry" +{% if registry_service_annotations %} + annotations: + {{ registry_service_annotations | to_nice_yaml(indent=2, width=1337) | indent(width=4) }} +{% endif %} +spec: + selector: + k8s-app: registry + type: {{ registry_service_type }} +{% if registry_service_type == "ClusterIP" and registry_service_cluster_ip != "" %} + clusterIP: {{ registry_service_cluster_ip }} +{% endif %} +{% if registry_service_type == "LoadBalancer" and registry_service_loadbalancer_ip != "" %} + loadBalancerIP: {{ registry_service_loadbalancer_ip }} +{% endif %} + ports: + - name: registry + port: {{ registry_port }} + protocol: TCP + targetPort: {{ registry_port }} +{% if registry_service_type == "NodePort" and registry_service_nodeport != "" %} + nodePort: {{ registry_service_nodeport }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml new file mode 100644 index 0000000..7b5dd73 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml @@ -0,0 +1,5 @@ +--- +snapshot_classes: + - name: cinder-csi-snapshot + is_default: false + force_create: true diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml new file mode 100644 index 0000000..7e9116f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Kubernetes Snapshots | Copy Cinder CSI Snapshot Class template + template: + src: "cinder-csi-snapshot-class.yml.j2" + dest: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Snapshots | Add Cinder CSI Snapshot Class + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/templates/cinder-csi-snapshot-class.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/templates/cinder-csi-snapshot-class.yml.j2 new file mode 100644 index 0000000..b7e649f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/cinder-csi/templates/cinder-csi-snapshot-class.yml.j2 @@ -0,0 +1,13 @@ +{% for class in snapshot_classes %} +--- +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1beta1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +driver: cinder.csi.openstack.org +deletionPolicy: Delete +parameters: + force-create: "{{ class.force_create }}" +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/meta/main.yml new file mode 100644 index 0000000..0eed56c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/meta/main.yml @@ -0,0 +1,14 @@ +--- +dependencies: + - role: kubernetes-apps/snapshots/snapshot-controller + when: + - cinder_csi_enabled or csi_snapshot_controller_enabled + tags: + - snapshot-controller + + - role: kubernetes-apps/snapshots/cinder-csi + when: + - cinder_csi_enabled + tags: + - snapshot + - cinder-csi-driver diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml new file mode 100644 index 0000000..c72dfb2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml @@ -0,0 +1,3 @@ +--- +snapshot_controller_replicas: 1 +snapshot_controller_namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml new file mode 100644 index 0000000..8663e8a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: check if snapshot namespace exists + register: snapshot_namespace_exists + kube: + kubectl: "{{ bin_dir }}/kubectl" + name: "{{ snapshot_controller_namespace }}" + resource: "namespace" + state: "exists" + when: inventory_hostname == groups['kube_control_plane'][0] + tags: snapshot-controller + +- name: Snapshot Controller | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: snapshot-ns, file: snapshot-ns.yml, apply: not snapshot_namespace_exists} + - {name: rbac-snapshot-controller, file: rbac-snapshot-controller.yml} + - {name: snapshot-controller, file: snapshot-controller.yml} + register: snapshot_controller_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + - item.apply | default(True) | bool + tags: snapshot-controller + +- name: Snapshot Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ snapshot_controller_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + tags: snapshot-controller diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/rbac-snapshot-controller.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/rbac-snapshot-controller.yml.j2 new file mode 100644 index 0000000..9413376 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/rbac-snapshot-controller.yml.j2 @@ -0,0 +1,85 @@ +# RBAC file for the snapshot controller. +# +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # rename if there are conflicts + name: snapshot-controller-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-role +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} +roleRef: + kind: ClusterRole + # change the name also here if the ClusterRole gets renamed + name: snapshot-controller-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ snapshot_controller_namespace }} + name: snapshot-controller-leaderelection +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: {{ snapshot_controller_namespace }} +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} +roleRef: + kind: Role + name: snapshot-controller-leaderelection + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-controller.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-controller.yml.j2 new file mode 100644 index 0000000..d17ffb3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-controller.yml.j2 @@ -0,0 +1,40 @@ +# This YAML file shows how to deploy the snapshot controller + +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} +spec: + replicas: {{ snapshot_controller_replicas }} + selector: + matchLabels: + app: snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: snapshot-controller + spec: + serviceAccount: snapshot-controller + containers: + - name: snapshot-controller + image: {{ snapshot_controller_image_repo }}:{{ snapshot_controller_image_tag }} + args: + - "--v=5" + - "--leader-election=false" + imagePullPolicy: {{ k8s_image_pull_policy }} diff --git a/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2 new file mode 100644 index 0000000..bb30d60 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ snapshot_controller_namespace }} + labels: + name: {{ snapshot_controller_namespace }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/client/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes/client/defaults/main.yml new file mode 100644 index 0000000..83506a4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/client/defaults/main.yml @@ -0,0 +1,8 @@ +--- +kubeconfig_localhost: false +kubeconfig_localhost_ansible_host: false +kubectl_localhost: false +artifacts_dir: "{{ inventory_dir }}/artifacts" + +kube_config_dir: "/etc/kubernetes" +kube_apiserver_port: "6443" diff --git a/kubespray/extra_playbooks/roles/kubernetes/client/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/client/tasks/main.yml new file mode 100644 index 0000000..cb9e81e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/client/tasks/main.yml @@ -0,0 +1,112 @@ +--- +- name: Set external kube-apiserver endpoint + set_fact: + external_apiserver_address: >- + {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%} + {{ loadbalancer_apiserver.address }} + {%- elif kubeconfig_localhost_ansible_host is defined and kubeconfig_localhost_ansible_host -%} + {{ hostvars[groups['kube_control_plane'][0]].ansible_host }} + {%- else -%} + {{ kube_apiserver_access_address }} + {%- endif -%} + external_apiserver_port: >- + {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%} + {{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- else -%} + {{ kube_apiserver_port }} + {%- endif -%} + tags: + - facts + +- name: Create kube config dir for current/ansible become user + file: + path: "{{ ansible_env.HOME | default('/root') }}/.kube" + mode: "0700" + state: directory + +- name: Copy admin kubeconfig to current/ansible become user home + copy: + src: "{{ kube_config_dir }}/admin.conf" + dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + remote_src: yes + mode: "0600" + backup: yes + +- name: Create kube artifacts dir + file: + path: "{{ artifacts_dir }}" + mode: "0750" + state: directory + delegate_to: localhost + connection: local + become: no + run_once: yes + when: kubeconfig_localhost + +- name: Wait for k8s apiserver + wait_for: + host: "{{ kube_apiserver_access_address }}" + port: "{{ kube_apiserver_port }}" + timeout: 180 + +- name: Get admin kubeconfig from remote host + slurp: + src: "{{ kube_config_dir }}/admin.conf" + run_once: yes + register: raw_admin_kubeconfig + when: kubeconfig_localhost + +- name: Convert kubeconfig to YAML + set_fact: + admin_kubeconfig: "{{ raw_admin_kubeconfig.content | b64decode | from_yaml }}" + when: kubeconfig_localhost + +- name: Override username in kubeconfig + set_fact: + final_admin_kubeconfig: "{{ admin_kubeconfig | combine(override_cluster_name, recursive=true) | combine(override_context, recursive=true) | combine(override_user, recursive=true) }}" + vars: + cluster_infos: "{{ admin_kubeconfig['clusters'][0]['cluster'] }}" + user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}" + username: "kubernetes-admin-{{ cluster_name }}" + context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}" + override_cluster_name: "{{ { 'clusters': [ { 'cluster': (cluster_infos|combine({'server': 'https://'+external_apiserver_address+':'+(external_apiserver_port|string)})), 'name': cluster_name } ] } }}" + override_context: "{{ { 'contexts': [ { 'context': { 'user': username, 'cluster': cluster_name }, 'name': context } ], 'current-context': context } }}" + override_user: "{{ { 'users': [ { 'name': username, 'user': user_certs } ] } }}" + when: kubeconfig_localhost + +- name: Write admin kubeconfig on ansible host + copy: + content: "{{ final_admin_kubeconfig | to_nice_yaml(indent=2) }}" + dest: "{{ artifacts_dir }}/admin.conf" + mode: 0600 + delegate_to: localhost + connection: local + become: no + run_once: yes + when: kubeconfig_localhost + +- name: Copy kubectl binary to ansible host + fetch: + src: "{{ bin_dir }}/kubectl" + dest: "{{ artifacts_dir }}/kubectl" + flat: yes + validate_checksum: no + register: copy_binary_result + until: copy_binary_result is not failed + retries: 20 + become: no + run_once: yes + when: kubectl_localhost + +- name: create helper script kubectl.sh on ansible host + copy: + content: | + #!/bin/bash + ${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@" + dest: "{{ artifacts_dir }}/kubectl.sh" + mode: 0755 + become: no + run_once: yes + delegate_to: localhost + connection: local + when: kubectl_localhost and kubeconfig_localhost diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/etcd.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/etcd.yml new file mode 100644 index 0000000..344ce9b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/etcd.yml @@ -0,0 +1,31 @@ +--- +# Set etcd user/group +etcd_owner: etcd + +# Note: This does not set up DNS entries. It simply adds the following DNS +# entries to the certificate +etcd_cert_alt_names: + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" + - "etcd" +etcd_cert_alt_ips: [] + +etcd_heartbeat_interval: "250" +etcd_election_timeout: "5000" + +# etcd_snapshot_count: "10000" + +etcd_metrics: "basic" + +## A dictionary of extra environment variables to add to etcd.env, formatted like: +## etcd_extra_vars: +## var1: "value1" +## var2: "value2" +## Note this is different from the etcd role with ETCD_ prfexi, caps, and underscores +etcd_extra_vars: {} + +# etcd_quota_backend_bytes: "2147483648" +# etcd_max_request_bytes: "1572864" + +etcd_compaction_retention: "8" diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml new file mode 100644 index 0000000..52346fa --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml @@ -0,0 +1,118 @@ +--- +# bind address for kube-proxy +kube_proxy_bind_address: '0.0.0.0' + +# acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +# default value of 'application/json'. This field will control all connections to the server used by a particular +# client. +kube_proxy_client_accept_content_types: '' + +# burst allows extra queries to accumulate when a client is exceeding its rate. +kube_proxy_client_burst: 10 + +# contentType is the content type used when sending data to the server from this client. +kube_proxy_client_content_type: application/vnd.kubernetes.protobuf + +# kubeconfig is the path to a KubeConfig file. +# Leave as empty string to generate from other fields +kube_proxy_client_kubeconfig: '' + +# qps controls the number of queries per second allowed for this connection. +kube_proxy_client_qps: 5 + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +kube_proxy_config_sync_period: 15m0s + +### Conntrack +# maxPerCore is the maximum number of NAT connections to track +# per CPU core (0 to leave the limit as-is and ignore min). +kube_proxy_conntrack_max_per_core: 32768 + +# min is the minimum value of connect-tracking records to allocate, +# regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is). +kube_proxy_conntrack_min: 131072 + +# tcpCloseWaitTimeout is how long an idle conntrack entry +# in CLOSE_WAIT state will remain in the conntrack +# table. (e.g. '60s'). Must be greater than 0 to set. +kube_proxy_conntrack_tcp_close_wait_timeout: 1h0m0s + +# tcpEstablishedTimeout is how long an idle TCP connection will be kept open +# (e.g. '2s'). Must be greater than 0 to set. +kube_proxy_conntrack_tcp_established_timeout: 24h0m0s + +# Enables profiling via web interface on /debug/pprof handler. +# Profiling handlers will be handled by metrics server. +kube_proxy_enable_profiling: false + +# bind address for kube-proxy health check +kube_proxy_healthz_bind_address: 0.0.0.0:10256 + +# If using the pure iptables proxy, SNAT everything. Note that it breaks any +# policy engine. +kube_proxy_masquerade_all: false + +# If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. +# Must be within the range [0, 31]. +kube_proxy_masquerade_bit: 14 + +# The minimum interval of how often the iptables or ipvs rules can be refreshed as +# endpoints and services change (e.g. '5s', '1m', '2h22m'). +kube_proxy_min_sync_period: 0s + +# The maximum interval of how often iptables or ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). +# Must be greater than 0. +kube_proxy_sync_period: 30s + +# A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. +kube_proxy_exclude_cidrs: [] + +# The ipvs scheduler type when proxy mode is ipvs +# rr: round-robin +# lc: least connection +# dh: destination hashing +# sh: source hashing +# sed: shortest expected delay +# nq: never queue +kube_proxy_scheduler: rr + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# kube_proxy_tcp_timeout is the timeout value used for idle IPVS TCP sessions. +# The default value is 0, which preserves the current timeout value on the system. +kube_proxy_tcp_timeout: 0s + +# kube_proxy_tcp_fin_timeout is the timeout value used for IPVS TCP sessions after receiving a FIN. +# The default value is 0, which preserves the current timeout value on the system. +kube_proxy_tcp_fin_timeout: 0s + +# kube_proxy_udp_timeout is the timeout value used for IPVS UDP packets. +# The default value is 0, which preserves the current timeout value on the system. +kube_proxy_udp_timeout: 0s + +# The IP address and port for the metrics server to serve on +# (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) +kube_proxy_metrics_bind_address: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +kube_proxy_oom_score_adj: -999 + +# portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed +# in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen. +kube_proxy_port_range: '' + +# udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxyMode=userspace. +kube_proxy_udp_idle_timeout: 250ms diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml new file mode 100644 index 0000000..e61bcb7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml @@ -0,0 +1,33 @@ +--- +# Extra args passed by kubeadm +kube_kubeadm_scheduler_extra_args: {} + +# Associated interface must be reachable by the rest of the cluster, and by +# CLI/web clients. +kube_scheduler_bind_address: 0.0.0.0 + +# ClientConnection options (e.g. Burst, QPS) except from kubeconfig. +kube_scheduler_client_conn_extra_opts: {} + +# Additional KubeSchedulerConfiguration settings (e.g. metricsBindAddress). +kube_scheduler_config_extra_opts: {} + +# List of scheduler extenders (dicts), each holding the values of how to +# communicate with the extender. +kube_scheduler_extenders: [] + +# Leader Election options (e.g. ResourceName, RetryPerion) except from +# LeaseDuration and Renew deadline which are defined in following vars. +kube_scheduler_leader_elect_extra_opts: {} + +# Leader election lease duration +kube_scheduler_leader_elect_lease_duration: 15s + +# Leader election lease timeout +kube_scheduler_leader_elect_renew_deadline: 10s + +# Lisf of scheduling profiles (ditcs) supported by kube-scheduler +kube_scheduler_profiles: [] + +# Extra volume mounts +scheduler_extra_volumes: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/main.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/main.yml new file mode 100644 index 0000000..32cabb9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/defaults/main/main.yml @@ -0,0 +1,230 @@ +--- +# disable upgrade cluster +upgrade_cluster_setup: false + +# By default the external API listens on all interfaces, this can be changed to +# listen on a specific address/interface. +# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too. +kube_apiserver_bind_address: 0.0.0.0 + +# A port range to reserve for services with NodePort visibility. +# Inclusive at both ends of the range. +kube_apiserver_node_port_range: "30000-32767" + +# ETCD backend for k8s data +kube_apiserver_storage_backend: etcd3 + +# CIS 1.2.26 +# Validate that the service account token +# in the request is actually present in etcd. +kube_apiserver_service_account_lookup: true + +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Associated interfaces must be reachable by the rest of the cluster, and by +# CLI/web clients. +kube_controller_manager_bind_address: 0.0.0.0 + +# Leader election lease durations and timeouts for controller-manager +kube_controller_manager_leader_elect_lease_duration: 15s +kube_controller_manager_leader_elect_renew_deadline: 10s + +# discovery_timeout modifies the discovery timeout +discovery_timeout: 5m0s + +# Instruct first master to refresh kubeadm token +kubeadm_refresh_token: true + +# Scale down coredns replicas to 0 if not using coredns dns_mode +kubeadm_scale_down_coredns_enabled: true + +# audit support +kubernetes_audit: false +# path to audit log file +audit_log_path: /var/log/audit/kube-apiserver-audit.log +# num days +audit_log_maxage: 30 +# the num of audit logs to retain +audit_log_maxbackups: 1 +# the max size in MB to retain +audit_log_maxsize: 100 +# policy file +audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml" +# custom audit policy rules (to replace the default ones) +# audit_policy_custom_rules: | +# - level: None +# users: [] +# verbs: [] +# resources: [] + +# audit log hostpath +audit_log_name: audit-logs +audit_log_hostpath: /var/log/kubernetes/audit +audit_log_mountpath: "{{ audit_log_path | dirname }}" + +# audit policy hostpath +audit_policy_name: audit-policy +audit_policy_hostpath: "{{ audit_policy_file | dirname }}" +audit_policy_mountpath: "{{ audit_policy_hostpath }}" + +# audit webhook support +kubernetes_audit_webhook: false + +# path to audit webhook config file +audit_webhook_config_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-webhook-config.yaml" +audit_webhook_server_url: "https://audit.app" +audit_webhook_server_extra_args: {} +audit_webhook_mode: batch +audit_webhook_batch_max_size: 100 +audit_webhook_batch_max_wait: 1s + +kube_controller_node_monitor_grace_period: 40s +kube_controller_node_monitor_period: 5s +kube_controller_terminated_pod_gc_threshold: 12500 +kube_apiserver_request_timeout: "1m0s" +kube_apiserver_pod_eviction_not_ready_timeout_seconds: "300" +kube_apiserver_pod_eviction_unreachable_timeout_seconds: "300" + +# 1.10+ admission plugins +kube_apiserver_enable_admission_plugins: [] + +# enable admission plugins configuration +kube_apiserver_admission_control_config_file: false + +# data structure to configure EventRateLimit admission plugin +# this should have the following structure: +# kube_apiserver_admission_event_rate_limits: +# : +# type: +# qps: +# burst: +# cache_size: +kube_apiserver_admission_event_rate_limits: {} + +kube_pod_security_use_default: false +kube_pod_security_default_enforce: baseline +kube_pod_security_default_enforce_version: latest +kube_pod_security_default_audit: restricted +kube_pod_security_default_audit_version: latest +kube_pod_security_default_warn: restricted +kube_pod_security_default_warn_version: latest +kube_pod_security_exemptions_usernames: [] +kube_pod_security_exemptions_runtime_class_names: [] +kube_pod_security_exemptions_namespaces: + - kube-system + +# 1.10+ list of disabled admission plugins +kube_apiserver_disable_admission_plugins: [] + +# extra runtime config +kube_api_runtime_config: [] + +## Enable/Disable Kube API Server Authentication Methods +kube_token_auth: false +kube_oidc_auth: false + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## Variables for webhook token authz https://kubernetes.io/docs/reference/access-authn-authz/webhook/ +# kube_webhook_authorization_url: https://... +kube_webhook_authorization: false +kube_webhook_authorization_url_skip_tls_verify: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' +# Copy oidc CA file to the following path if needed +# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem +# Optionally include a base64-encoded oidc CA cert +# kube_oidc_ca_cert: c3RhY2thYnVzZS5jb20... + +# List of the preferred NodeAddressTypes to use for kubelet connections. +kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP' + +## Extra args for k8s components passing by kubeadm +kube_kubeadm_apiserver_extra_args: {} +kube_kubeadm_controller_extra_args: {} + +## Extra control plane host volume mounts +## Example: +# apiserver_extra_volumes: +# - name: name +# hostPath: /host/path +# mountPath: /mount/path +# readOnly: true +apiserver_extra_volumes: {} +controller_manager_extra_volumes: {} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false +kube_encrypt_token: "{{ lookup('password', credentials_dir + '/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" +# Must be either: aescbc, secretbox or aesgcm +kube_encryption_algorithm: "secretbox" +# Which kubernetes resources to encrypt +kube_encryption_resources: [secrets] + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} + +secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +auto_renew_certificates_systemd_calendar: "{{ 'Mon *-*-1,2,3,4,5,6,7 03:' ~ + groups['kube_control_plane'].index(inventory_hostname) ~ '0:00' }}" +# kubeadm renews all the certificates during control plane upgrade. +# If we have requirement like without renewing certs upgrade the cluster, +# we can opt out from the default behavior by setting kubeadm_upgrade_auto_cert_renewal to false +kubeadm_upgrade_auto_cert_renewal: true diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/handlers/main.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/handlers/main.yml new file mode 100644 index 0000000..e6bc321 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/handlers/main.yml @@ -0,0 +1,123 @@ +--- +- name: Master | restart kubelet + command: /bin/true + notify: + - Master | reload systemd + - Master | reload kubelet + - Master | wait for master static pods + +- name: Master | wait for master static pods + command: /bin/true + notify: + - Master | wait for the apiserver to be running + - Master | wait for kube-scheduler + - Master | wait for kube-controller-manager + +- name: Master | Restart apiserver + command: /bin/true + notify: + - Master | Remove apiserver container docker + - Master | Remove apiserver container containerd/crio + - Master | wait for the apiserver to be running + +- name: Master | Restart kube-scheduler + command: /bin/true + notify: + - Master | Remove scheduler container docker + - Master | Remove scheduler container containerd/crio + - Master | wait for kube-scheduler + +- name: Master | Restart kube-controller-manager + command: /bin/true + notify: + - Master | Remove controller manager container docker + - Master | Remove controller manager container containerd/crio + - Master | wait for kube-controller-manager + +- name: Master | reload systemd + systemd: + daemon_reload: true + +- name: Master | reload kubelet + service: + name: kubelet + state: restarted + +- name: Master | Remove apiserver container docker + shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f + register: remove_apiserver_container + retries: 10 + until: remove_apiserver_container.rc == 0 + delay: 1 + when: container_manager == "docker" + +- name: Master | Remove apiserver container containerd/crio + shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + register: remove_apiserver_container + retries: 10 + until: remove_apiserver_container.rc == 0 + delay: 1 + when: container_manager in ['containerd', 'crio'] + +- name: Master | Remove scheduler container docker + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + register: remove_scheduler_container + retries: 10 + until: remove_scheduler_container.rc == 0 + delay: 1 + when: container_manager == "docker" + +- name: Master | Remove scheduler container containerd/crio + shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + register: remove_scheduler_container + retries: 10 + until: remove_scheduler_container.rc == 0 + delay: 1 + when: container_manager in ['containerd', 'crio'] + +- name: Master | Remove controller manager container docker + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + register: remove_cm_container + retries: 10 + until: remove_cm_container.rc == 0 + delay: 1 + when: container_manager == "docker" + +- name: Master | Remove controller manager container containerd/crio + shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + register: remove_cm_container + retries: 10 + until: remove_cm_container.rc == 0 + delay: 1 + when: container_manager in ['containerd', 'crio'] + +- name: Master | wait for kube-scheduler + vars: + endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}" + uri: + url: https://{{ endpoint }}:10259/healthz + validate_certs: no + register: scheduler_result + until: scheduler_result.status == 200 + retries: 60 + delay: 1 + +- name: Master | wait for kube-controller-manager + vars: + endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}" + uri: + url: https://{{ endpoint }}:10257/healthz + validate_certs: no + register: controller_manager_result + until: controller_manager_result.status == 200 + retries: 60 + delay: 1 + +- name: Master | wait for the apiserver to be running + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + register: result + until: result.status == 200 + retries: 60 + delay: 1 diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/meta/main.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/meta/main.yml new file mode 100644 index 0000000..2657006 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/meta/main.yml @@ -0,0 +1,11 @@ +--- +dependencies: + - role: kubernetes/tokens + when: kube_token_auth + tags: + - k8s-secrets + - role: adduser + user: "{{ addusers.etcd }}" + when: + - etcd_deployment_type == "kubeadm" + - not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml new file mode 100644 index 0000000..d01f511 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml @@ -0,0 +1,19 @@ +--- + +- name: Check which kube-control nodes are already members of the cluster + command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json" + register: kube_control_planes_raw + ignore_errors: yes + changed_when: false + +- name: Set fact joined_control_panes + set_fact: + joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}" + delegate_to: item + loop: "{{ groups['kube_control_plane'] }}" + when: kube_control_planes_raw is succeeded + run_once: yes + +- name: Set fact first_kube_control_plane + set_fact: + first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml new file mode 100644 index 0000000..b88f57c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml @@ -0,0 +1,42 @@ +--- +- name: Check if secret for encrypting data at rest already exist + stat: + path: "{{ kube_cert_dir }}/secrets_encryption.yaml" + get_attributes: no + get_checksum: no + get_mime: no + register: secrets_encryption_file + +- name: Slurp secrets_encryption file if it exists + slurp: + src: "{{ kube_cert_dir }}/secrets_encryption.yaml" + register: secret_file_encoded + when: secrets_encryption_file.stat.exists + +- name: Base 64 Decode slurped secrets_encryption.yaml file + set_fact: + secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}" + when: secrets_encryption_file.stat.exists + +- name: Extract secret value from secrets_encryption.yaml + set_fact: + kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}" + when: secrets_encryption_file.stat.exists + +- name: Set kube_encrypt_token across master nodes + set_fact: + kube_encrypt_token: "{{ kube_encrypt_token_extracted }}" + delegate_to: "{{ item }}" + delegate_facts: true + with_inventory_hostnames: kube_control_plane + when: kube_encrypt_token_extracted is defined + +- name: Write secrets for encrypting secret data at rest + template: + src: secrets_encryption.yaml.j2 + dest: "{{ kube_cert_dir }}/secrets_encryption.yaml" + owner: root + group: "{{ kube_cert_group }}" + mode: 0640 + tags: + - kube-apiserver diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml new file mode 100644 index 0000000..36bb627 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml @@ -0,0 +1,28 @@ +--- +- name: Backup old certs and keys + copy: + src: "{{ kube_cert_dir }}/{{ item }}" + dest: "{{ kube_cert_dir }}/{{ item }}.old" + mode: preserve + remote_src: yes + with_items: + - apiserver.crt + - apiserver.key + - apiserver-kubelet-client.crt + - apiserver-kubelet-client.key + - front-proxy-client.crt + - front-proxy-client.key + ignore_errors: true # noqa ignore-errors + +- name: Backup old confs + copy: + src: "{{ kube_config_dir }}/{{ item }}" + dest: "{{ kube_config_dir }}/{{ item }}.old" + mode: preserve + remote_src: yes + with_items: + - admin.conf + - controller-manager.conf + - kubelet.conf + - scheduler.conf + ignore_errors: true # noqa ignore-errors diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-etcd.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-etcd.yml new file mode 100644 index 0000000..ae47354 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-etcd.yml @@ -0,0 +1,26 @@ +--- +- name: Calculate etcd cert serial + command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial" + register: "etcd_client_cert_serial_result" + changed_when: false + tags: + - network + +- name: Set etcd_client_cert_serial + set_fact: + etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" + tags: + - network + +- name: Ensure etcdctl script is installed + import_role: + name: etcdctl + when: etcd_deployment_type == "kubeadm" + +- name: Set ownership for etcd data directory + file: + path: "{{ etcd_data_dir }}" + owner: "{{ etcd_owner }}" + group: "{{ etcd_owner }}" + mode: 0700 + when: etcd_deployment_type == "kubeadm" diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml new file mode 100644 index 0000000..8f2f38e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml @@ -0,0 +1,24 @@ +--- + +- name: Update server field in component kubeconfigs + lineinfile: + dest: "{{ kube_config_dir }}/{{ item }}" + regexp: '^ server: https' + line: ' server: {{ kube_apiserver_endpoint }}' + backup: yes + with_items: + - admin.conf + - controller-manager.conf + - kubelet.conf + - scheduler.conf + notify: + - "Master | Restart kube-controller-manager" + - "Master | Restart kube-scheduler" + - "Master | reload kubelet" + +- name: Update etcd-servers for apiserver + lineinfile: + dest: "{{ kube_config_dir }}/manifests/kube-apiserver.yaml" + regexp: '^ - --etcd-servers=' + line: ' - --etcd-servers={{ etcd_access_addresses }}' + when: etcd_deployment_type != "kubeadm" diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml new file mode 100644 index 0000000..a4869fe --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -0,0 +1,79 @@ +--- +- name: Set kubeadm_discovery_address + set_fact: + kubeadm_discovery_address: >- + {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} + {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- else -%} + {{ kube_apiserver_endpoint | regex_replace('https://', '') }} + {%- endif %} + tags: + - facts + +- name: Upload certificates so they are fresh and not expired + command: >- + {{ bin_dir }}/kubeadm init phase + --config {{ kube_config_dir }}/kubeadm-config.yaml + upload-certs + --upload-certs + register: kubeadm_upload_cert + when: + - inventory_hostname == first_kube_control_plane + - not kube_external_ca_mode + +- name: Parse certificate key if not set + set_fact: + kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}" + run_once: yes + when: + - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined + - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped + +- name: Create kubeadm ControlPlane config + template: + src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml" + mode: 0640 + backup: yes + when: + - inventory_hostname != first_kube_control_plane + - not kubeadm_already_run.stat.exists + +- name: Wait for k8s apiserver + wait_for: + host: "{{ kubeadm_discovery_address.split(':')[0] }}" + port: "{{ kubeadm_discovery_address.split(':')[1] }}" + timeout: 180 + + +- name: check already run + debug: + msg: "{{ kubeadm_already_run.stat.exists }}" + +- name: Reset cert directory + shell: >- + if [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then + {{ bin_dir }}/kubeadm reset -f --cert-dir {{ kube_cert_dir }}; + fi + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + when: + - inventory_hostname != first_kube_control_plane + - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists + - not kube_external_ca_mode + +- name: Joining control plane node to the cluster. + command: >- + {{ bin_dir }}/kubeadm join + --config {{ kube_config_dir }}/kubeadm-controlplane.yaml + --ignore-preflight-errors=all + --skip-phases={{ kubeadm_join_phases_skip | join(',') }} + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + register: kubeadm_join_control_plane + retries: 3 + throttle: 1 + until: kubeadm_join_control_plane is succeeded + when: + - inventory_hostname != first_kube_control_plane + - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml new file mode 100644 index 0000000..d9f7304 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -0,0 +1,248 @@ +--- +- name: Install OIDC certificate + copy: + content: "{{ kube_oidc_ca_cert | b64decode }}" + dest: "{{ kube_oidc_ca_file }}" + owner: root + group: root + mode: "0644" + when: + - kube_oidc_auth + - kube_oidc_ca_cert is defined + +- name: kubeadm | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + get_attributes: no + get_checksum: no + get_mime: no + register: kubeadm_already_run + +- name: kubeadm | Backup kubeadm certs / kubeconfig + import_tasks: kubeadm-backup.yml + when: + - kubeadm_already_run.stat.exists + +- name: kubeadm | aggregate all SANs + set_fact: + apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}" + vars: + sans_base: + - "kubernetes" + - "kubernetes.default" + - "kubernetes.default.svc" + - "kubernetes.default.svc.{{ dns_domain }}" + - "{{ kube_apiserver_ip }}" + - "localhost" + - "127.0.0.1" + sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}" + sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}" + sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}" + sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}" + sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}" + sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}" + sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}" + sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}" + sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}" + sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}" + tags: facts + +- name: Create audit-policy directory + file: + path: "{{ audit_policy_file | dirname }}" + state: directory + mode: 0640 + when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false) + +- name: Write api audit policy yaml + template: + src: apiserver-audit-policy.yaml.j2 + dest: "{{ audit_policy_file }}" + mode: 0640 + when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false) + +- name: Write api audit webhook config yaml + template: + src: apiserver-audit-webhook-config.yaml.j2 + dest: "{{ audit_webhook_config_file }}" + mode: 0640 + when: kubernetes_audit_webhook|default(false) + +# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint. +- name: set kubeadm_config_api_fqdn define + set_fact: + kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}" + when: loadbalancer_apiserver is defined + +- name: Set kubeadm api version to v1beta3 + set_fact: + kubeadmConfig_api_version: v1beta3 + +- name: kubeadm | Create kubeadm config + template: + src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/kubeadm-config.yaml" + mode: 0640 + +- name: kubeadm | Create directory to store admission control configurations + file: + path: "{{ kube_config_dir }}/admission-controls" + state: directory + mode: 0640 + when: kube_apiserver_admission_control_config_file + +- name: kubeadm | Push admission control config file + template: + src: "admission-controls.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml" + mode: 0640 + when: kube_apiserver_admission_control_config_file + +- name: kubeadm | Push admission control config files + template: + src: "{{ item|lower }}.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml" + mode: 0640 + when: + - kube_apiserver_admission_control_config_file + - item in kube_apiserver_admission_plugins_needs_configuration + loop: "{{ kube_apiserver_enable_admission_plugins }}" + +- name: kubeadm | Check if apiserver.crt contains all needed SANs + shell: | + set -o pipefail + for IP in {{ apiserver_ips | join(' ') }}; do + openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkip $IP | grep -q 'does match certificate' || echo 'NEED-RENEW' + done + for HOST in {{ apiserver_hosts | join(' ') }}; do + openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkhost $HOST | grep -q 'does match certificate' || echo 'NEED-RENEW' + done + vars: + apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}" + apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}" + args: + executable: /bin/bash + register: apiserver_sans_check + changed_when: "'NEED-RENEW' in apiserver_sans_check.stdout" + when: + - kubeadm_already_run.stat.exists + - not kube_external_ca_mode + +- name: kubeadm | regenerate apiserver cert 1/2 + file: + state: absent + path: "{{ kube_cert_dir }}/{{ item }}" + with_items: + - apiserver.crt + - apiserver.key + when: + - kubeadm_already_run.stat.exists + - apiserver_sans_check.changed + - not kube_external_ca_mode + +- name: kubeadm | regenerate apiserver cert 2/2 + command: >- + {{ bin_dir }}/kubeadm + init phase certs apiserver + --config={{ kube_config_dir }}/kubeadm-config.yaml + when: + - kubeadm_already_run.stat.exists + - apiserver_sans_check.changed + - not kube_external_ca_mode + +- name: kubeadm | Create directory to store kubeadm patches + file: + path: "{{ kubeadm_patches.dest_dir }}" + state: directory + mode: 0640 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: kubeadm | Copy kubeadm patches from inventory files + copy: + src: "{{ kubeadm_patches.source_dir }}/" + dest: "{{ kubeadm_patches.dest_dir }}" + owner: "root" + mode: 0644 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: kubeadm | Initialize first master + command: >- + timeout -k 300s 300s + {{ bin_dir }}/kubeadm init + --config={{ kube_config_dir }}/kubeadm-config.yaml + --ignore-preflight-errors=all + --skip-phases={{ kubeadm_init_phases_skip | join(',') }} + {{ kube_external_ca_mode | ternary('', '--upload-certs') }} + register: kubeadm_init + # Retry is because upload config sometimes fails + retries: 3 + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + when: inventory_hostname == first_kube_control_plane and not kubeadm_already_run.stat.exists + failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + notify: Master | restart kubelet + +- name: set kubeadm certificate key + set_fact: + kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}" + with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}" + when: + - kubeadm_certificate_key is not defined + - (item | trim) is match('.*--certificate-key.*') + +- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined) + shell: >- + {{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :; + {{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }} + changed_when: false + when: + - inventory_hostname == first_kube_control_plane + - kubeadm_token is defined + - kubeadm_refresh_token + tags: + - kubeadm_token + +- name: Create kubeadm token for joining nodes with 24h expiration (default) + command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create" + changed_when: false + register: temp_token + retries: 5 + delay: 5 + until: temp_token is succeeded + delegate_to: "{{ first_kube_control_plane }}" + when: kubeadm_token is not defined + tags: + - kubeadm_token + +- name: Set kubeadm_token + set_fact: + kubeadm_token: "{{ temp_token.stdout }}" + when: temp_token.stdout is defined + tags: + - kubeadm_token + +- name: PodSecurityPolicy | install PodSecurityPolicy + include_tasks: psp-install.yml + when: + - podsecuritypolicy_enabled + - inventory_hostname == first_kube_control_plane + +- name: kubeadm | Join other masters + include_tasks: kubeadm-secondary.yml + +- name: kubeadm | upgrade kubernetes cluster + include_tasks: kubeadm-upgrade.yml + when: + - upgrade_cluster_setup + - kubeadm_already_run.stat.exists + +# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. +- name: kubeadm | Remove taint for master with node role + command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}" + delegate_to: "{{ first_kube_control_plane }}" + with_items: + - "node-role.kubernetes.io/master:NoSchedule-" + - "node-role.kubernetes.io/control-plane:NoSchedule-" + when: inventory_hostname in groups['kube_node'] + failed_when: false diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml new file mode 100644 index 0000000..711a2e0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -0,0 +1,75 @@ +--- +- name: kubeadm | Check api is up + uri: + url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz" + validate_certs: false + when: inventory_hostname in groups['kube_control_plane'] + register: _result + retries: 60 + delay: 5 + until: _result.status == 200 + +- name: kubeadm | Upgrade first master + command: >- + timeout -k 600s 600s + {{ bin_dir }}/kubeadm + upgrade apply -y {{ kube_version }} + --certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }} + --config={{ kube_config_dir }}/kubeadm-config.yaml + --ignore-preflight-errors=all + --allow-experimental-upgrades + --etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }} + --force + register: kubeadm_upgrade + # Retry is because upload config sometimes fails + retries: 3 + until: kubeadm_upgrade.rc == 0 + when: inventory_hostname == first_kube_control_plane + failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + notify: Master | restart kubelet + +- name: kubeadm | Upgrade other masters + command: >- + timeout -k 600s 600s + {{ bin_dir }}/kubeadm + upgrade apply -y {{ kube_version }} + --certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }} + --config={{ kube_config_dir }}/kubeadm-config.yaml + --ignore-preflight-errors=all + --allow-experimental-upgrades + --etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }} + --force + register: kubeadm_upgrade + when: inventory_hostname != first_kube_control_plane + failed_when: + - kubeadm_upgrade.rc != 0 + - '"field is immutable" not in kubeadm_upgrade.stderr' + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + notify: Master | restart kubelet + +- name: kubeadm | clean kubectl cache to refresh api types + file: + path: "{{ item }}" + state: absent + with_items: + - /root/.kube/cache + - /root/.kube/http-cache + +# FIXME: https://github.com/kubernetes/kubeadm/issues/1318 +- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode + command: >- + {{ kubectl }} + -n kube-system + scale deployment/coredns --replicas 0 + register: scale_down_coredns + retries: 6 + delay: 5 + until: scale_down_coredns is succeeded + run_once: yes + when: + - kubeadm_scale_down_coredns_enabled + - dns_mode not in ['coredns', 'coredns_dual'] + changed_when: false diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml new file mode 100644 index 0000000..7d0c1a0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml @@ -0,0 +1,18 @@ +--- +- name: Fixup kubelet client cert rotation 1/2 + lineinfile: + path: "{{ kube_config_dir }}/kubelet.conf" + regexp: '^ client-certificate-data: ' + line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem' + backup: yes + notify: + - "Master | reload kubelet" + +- name: Fixup kubelet client cert rotation 2/2 + lineinfile: + path: "{{ kube_config_dir }}/kubelet.conf" + regexp: '^ client-key-data: ' + line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem' + backup: yes + notify: + - "Master | reload kubelet" diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/main.yml new file mode 100644 index 0000000..bd8029a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/main.yml @@ -0,0 +1,104 @@ +--- +- import_tasks: pre-upgrade.yml + tags: + - k8s-pre-upgrade + +- name: Create webhook token auth config + template: + src: webhook-token-auth-config.yaml.j2 + dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml" + mode: 0640 + when: kube_webhook_token_auth|default(false) + +- name: Create webhook authorization config + template: + src: webhook-authorization-config.yaml.j2 + dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml" + mode: 0640 + when: kube_webhook_authorization|default(false) + +- name: Create kube-scheduler config + template: + src: kubescheduler-config.yaml.j2 + dest: "{{ kube_config_dir }}/kubescheduler-config.yaml" + mode: 0644 + +- import_tasks: encrypt-at-rest.yml + when: + - kube_encrypt_secret_data + +- name: Install | Copy kubectl binary from download dir + copy: + src: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubectl" + mode: 0755 + remote_src: true + tags: + - kubectl + - upgrade + +- name: Install kubectl bash completion + shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh" + when: ansible_os_family in ["Debian","RedHat"] + tags: + - kubectl + ignore_errors: true # noqa ignore-errors + +- name: Set kubectl bash completion file permissions + file: + path: /etc/bash_completion.d/kubectl.sh + owner: root + group: root + mode: 0755 + when: ansible_os_family in ["Debian","RedHat"] + tags: + - kubectl + - upgrade + ignore_errors: true # noqa ignore-errors + +- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy + set_fact: + kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}" + when: podsecuritypolicy_enabled + +- name: Define nodes already joined to existing cluster and first_kube_control_plane + import_tasks: define-first-kube-control.yml + +- name: Include kubeadm setup + import_tasks: kubeadm-setup.yml + +- name: Include kubeadm etcd extra tasks + include_tasks: kubeadm-etcd.yml + when: etcd_deployment_type == "kubeadm" + +- name: Include kubeadm secondary server apiserver fixes + include_tasks: kubeadm-fix-apiserver.yml + +- name: Include kubelet client cert rotation fixes + include_tasks: kubelet-fix-client-cert-rotation.yml + when: kubelet_rotate_certificates + +- name: Install script to renew K8S control plane certificates + template: + src: k8s-certs-renew.sh.j2 + dest: "{{ bin_dir }}/k8s-certs-renew.sh" + mode: 0755 + +- name: Renew K8S control plane certificates monthly 1/2 + template: + src: "{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + mode: 0644 + with_items: + - k8s-certs-renew.service + - k8s-certs-renew.timer + register: k8s_certs_units + when: auto_renew_certificates + +- name: Renew K8S control plane certificates monthly 2/2 + systemd: + name: k8s-certs-renew.timer + enabled: yes + state: started + daemon-reload: "{{ k8s_certs_units is changed }}" + when: auto_renew_certificates diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/pre-upgrade.yml new file mode 100644 index 0000000..27c04ea --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/pre-upgrade.yml @@ -0,0 +1,21 @@ +--- +- name: "Pre-upgrade | Delete master manifests if etcd secrets changed" + file: + path: "/etc/kubernetes/manifests/{{ item }}.manifest" + state: absent + with_items: + - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] + register: kube_apiserver_manifest_replaced + when: etcd_secret_changed|default(false) + +- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503 + shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f" + args: + executable: /bin/bash + with_items: + - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] + when: kube_apiserver_manifest_replaced.changed + register: remove_master_container + retries: 10 + until: remove_master_container.rc == 0 + delay: 1 diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/psp-install.yml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/psp-install.yml new file mode 100644 index 0000000..581d128 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/tasks/psp-install.yml @@ -0,0 +1,38 @@ +--- +- name: Check AppArmor status + command: which apparmor_parser + register: apparmor_status + failed_when: false + changed_when: apparmor_status.rc != 0 + +- name: Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + +- name: Render templates for PodSecurityPolicy + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0640 + register: psp_manifests + with_items: + - {file: psp.yml, type: psp, name: psp} + - {file: psp-cr.yml, type: clusterrole, name: psp-cr} + - {file: psp-crb.yml, type: rolebinding, name: psp-crb} + +- name: Add policies, roles, bindings for PodSecurityPolicy + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + register: result + until: result is succeeded + retries: 10 + delay: 6 + with_items: "{{ psp_manifests.results }}" + environment: + KUBECONFIG: "{{ kube_config_dir }}/admin.conf" + loop_control: + label: "{{ item.item.file }}" \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 new file mode 100644 index 0000000..34f5f18 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 @@ -0,0 +1,9 @@ +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +{% for plugin in kube_apiserver_enable_admission_plugins %} +{% if plugin in kube_apiserver_admission_plugins_needs_configuration %} +- name: {{ plugin }} + path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml +{% endif %} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2 new file mode 100644 index 0000000..ca7bcf8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2 @@ -0,0 +1,129 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +{% if audit_policy_custom_rules is defined and audit_policy_custom_rules != "" %} +{{ audit_policy_custom_rules | indent(2, true) }} +{% else %} + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + # Ingress controller reads `configmaps/ingress-uid` through the unsecured port. + # TODO(#46983): Change this to the ingress controller service account. + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps", "serviceaccounts/token"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get responses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2 new file mode 100644 index 0000000..cd8208e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Config +clusters: +- cluster: + server: {{ audit_webhook_server_url }} +{% for key in audit_webhook_server_extra_args %} + {{ key }}: "{{ audit_webhook_server_extra_args[key] }}" +{% endfor %} + name: auditsink +contexts: +- context: + cluster: auditsink + user: "" + name: default-context +current-context: default-context +preferences: {} +users: [] diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/eventratelimit.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/eventratelimit.yaml.j2 new file mode 100644 index 0000000..0d78670 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/eventratelimit.yaml.j2 @@ -0,0 +1,11 @@ +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +{% for limit in kube_apiserver_admission_event_rate_limits.values() %} +- type: {{ limit.type }} + qps: {{ limit.qps }} + burst: {{ limit.burst }} +{% if limit.cache_size is defined %} + cacheSize: {{ limit.cache_size }} +{% endif %} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2 new file mode 100644 index 0000000..64610c2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2 @@ -0,0 +1,6 @@ +[Unit] +Description=Renew K8S control plane certificates + +[Service] +Type=oneshot +ExecStart={{ bin_dir }}/k8s-certs-renew.sh diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.sh.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.sh.j2 new file mode 100644 index 0000000..53bb825 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.sh.j2 @@ -0,0 +1,23 @@ +#!/bin/bash + +echo "## Expiration before renewal ##" +{{ bin_dir }}/kubeadm certs check-expiration + +echo "## Renewing certificates managed by kubeadm ##" +{{ bin_dir }}/kubeadm certs renew all + +echo "## Restarting control plane pods managed by kubeadm ##" +{% if container_manager == "docker" %} +{{ docker_bin_dir }}/docker ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs {{ docker_bin_dir }}/docker rm -f +{% else %} +{{ bin_dir }}/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs {{ bin_dir }}/crictl rmp -f +{% endif %} + +echo "## Updating /root/.kube/config ##" +cp {{ kube_config_dir }}/admin.conf /root/.kube/config + +echo "## Waiting for apiserver to be up again ##" +until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done + +echo "## Expiration after renewal ##" +{{ bin_dir }}/kubeadm certs check-expiration diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 new file mode 100644 index 0000000..904f007 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 @@ -0,0 +1,8 @@ +[Unit] +Description=Timer to renew K8S control plane certificates + +[Timer] +OnCalendar={{ auto_renew_certificates_systemd_calendar }} + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 new file mode 100644 index 0000000..2fbd553 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 @@ -0,0 +1,453 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +{% if kubeadm_token is defined %} +bootstrapTokens: +- token: "{{ kubeadm_token }}" + description: "kubespray kubeadm bootstrap token" + ttl: "24h" +{% endif %} +localAPIEndpoint: + advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }} + bindPort: {{ kube_apiserver_port }} +{% if kubeadm_certificate_key is defined %} +certificateKey: {{ kubeadm_certificate_key }} +{% endif %} +nodeRegistration: +{% if kube_override_hostname|default('') %} + name: {{ kube_override_hostname }} +{% endif %} +{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +{% else %} + taints: [] +{% endif %} + criSocket: {{ cri_socket }} +{% if cloud_provider is defined and cloud_provider in ["external"] %} + kubeletExtraArgs: + cloud-provider: external +{% endif %} +{% if kubeadm_patches is defined and kubeadm_patches.enabled %} +patches: + directory: {{ kubeadm_patches.dest_dir }} +{% endif %} +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +clusterName: {{ cluster_name }} +etcd: +{% if etcd_deployment_type != "kubeadm" %} + external: + endpoints: +{% for endpoint in etcd_access_addresses.split(',') %} + - {{ endpoint }} +{% endfor %} + caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }} + certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }} + keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }} +{% elif etcd_deployment_type == "kubeadm" %} + local: + imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}" + imageTag: "{{ etcd_image_tag }}" + dataDir: "{{ etcd_data_dir }}" + extraArgs: + metrics: {{ etcd_metrics }} + election-timeout: "{{ etcd_election_timeout }}" + heartbeat-interval: "{{ etcd_heartbeat_interval }}" + auto-compaction-retention: "{{ etcd_compaction_retention }}" +{% if etcd_snapshot_count is defined %} + snapshot-count: "{{ etcd_snapshot_count }}" +{% endif %} +{% if etcd_quota_backend_bytes is defined %} + quota-backend-bytes: "{{ etcd_quota_backend_bytes }}" +{% endif %} +{% if etcd_max_request_bytes is defined %} + max-request-bytes: "{{ etcd_max_request_bytes }}" +{% endif %} +{% if etcd_log_level is defined %} + log-level: "{{ etcd_log_level }}" +{% endif %} +{% for key, value in etcd_extra_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} + serverCertSANs: +{% for san in etcd_cert_alt_names %} + - {{ san }} +{% endfor %} +{% for san in etcd_cert_alt_ips %} + - {{ san }} +{% endfor %} + peerCertSANs: +{% for san in etcd_cert_alt_names %} + - {{ san }} +{% endfor %} +{% for san in etcd_cert_alt_ips %} + - {{ san }} +{% endfor %} +{% endif %} +dns: + imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }} + imageTag: {{ coredns_image_tag }} +networking: + dnsDomain: {{ dns_domain }} + serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" +{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} + podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +{% endif %} +{% if kubeadm_feature_gates %} +featureGates: +{% for feature in kubeadm_feature_gates %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} +kubernetesVersion: {{ kube_version }} +{% if kubeadm_config_api_fqdn is defined %} +controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +{% else %} +controlPlaneEndpoint: {{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }} +{% endif %} +certificatesDir: {{ kube_cert_dir }} +imageRepository: {{ kube_image_repo }} +apiServer: + extraArgs: +{% if kube_apiserver_pod_eviction_not_ready_timeout_seconds is defined %} + default-not-ready-toleration-seconds: "{{ kube_apiserver_pod_eviction_not_ready_timeout_seconds }}" +{% endif %} +{% if kube_apiserver_pod_eviction_unreachable_timeout_seconds is defined %} + default-unreachable-toleration-seconds: "{{ kube_apiserver_pod_eviction_unreachable_timeout_seconds }}" +{% endif %} +{% if kube_api_anonymous_auth is defined %} + anonymous-auth: "{{ kube_api_anonymous_auth }}" +{% endif %} + authorization-mode: {{ authorization_modes | join(',') }} + bind-address: {{ kube_apiserver_bind_address }} +{% if kube_apiserver_enable_admission_plugins|length > 0 %} + enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }} +{% endif %} +{% if kube_apiserver_admission_control_config_file %} + admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml +{% endif %} +{% if kube_apiserver_disable_admission_plugins|length > 0 %} + disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }} +{% endif %} + apiserver-count: "{{ kube_apiserver_count }}" + endpoint-reconciler-type: lease +{% if etcd_events_cluster_enabled %} + etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}" +{% endif %} + service-node-port-range: {{ kube_apiserver_node_port_range }} + service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" + profiling: "{{ kube_profiling }}" + request-timeout: "{{ kube_apiserver_request_timeout }}" + enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" +{% if kube_token_auth|default(true) %} + token-auth-file: {{ kube_token_dir }}/known_tokens.csv +{% endif %} +{% if kube_apiserver_service_account_lookup %} + service-account-lookup: "{{ kube_apiserver_service_account_lookup }}" +{% endif %} +{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} + oidc-issuer-url: "{{ kube_oidc_url }}" + oidc-client-id: "{{ kube_oidc_client_id }}" +{% if kube_oidc_ca_file is defined %} + oidc-ca-file: "{{ kube_oidc_ca_file }}" +{% endif %} +{% if kube_oidc_username_claim is defined %} + oidc-username-claim: "{{ kube_oidc_username_claim }}" +{% endif %} +{% if kube_oidc_groups_claim is defined %} + oidc-groups-claim: "{{ kube_oidc_groups_claim }}" +{% endif %} +{% if kube_oidc_username_prefix is defined %} + oidc-username-prefix: "{{ kube_oidc_username_prefix }}" +{% endif %} +{% if kube_oidc_groups_prefix is defined %} + oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}" +{% endif %} +{% endif %} +{% if kube_webhook_token_auth|default(false) %} + authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml +{% endif %} +{% if kube_webhook_authorization|default(false) %} + authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml +{% endif %} +{% if kube_encrypt_secret_data %} + encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml +{% endif %} + storage-backend: {{ kube_apiserver_storage_backend }} +{% if kube_api_runtime_config|length > 0 %} + runtime-config: {{ kube_api_runtime_config | join(',') }} +{% endif %} + allow-privileged: "true" +{% if kubernetes_audit or kubernetes_audit_webhook %} + audit-policy-file: {{ audit_policy_file }} +{% endif %} +{% if kubernetes_audit %} + audit-log-path: "{{ audit_log_path }}" + audit-log-maxage: "{{ audit_log_maxage }}" + audit-log-maxbackup: "{{ audit_log_maxbackups }}" + audit-log-maxsize: "{{ audit_log_maxsize }}" +{% endif %} +{% if kubernetes_audit_webhook %} + audit-webhook-config-file: {{ audit_webhook_config_file }} + audit-webhook-mode: {{ audit_webhook_mode }} +{% if audit_webhook_mode == "batch" %} + audit-webhook-batch-max-size: "{{ audit_webhook_batch_max_size }}" + audit-webhook-batch-max-wait: "{{ audit_webhook_batch_max_wait }}" +{% endif %} +{% endif %} +{% for key in kube_kubeadm_apiserver_extra_args %} + {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}" +{% endfor %} +{% if kube_apiserver_feature_gates or kube_feature_gates %} + feature-gates: "{{ kube_apiserver_feature_gates | default(kube_feature_gates, true) | join(',') }}" +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + cloud-provider: {{ cloud_provider }} + cloud-config: {{ kube_config_dir }}/cloud_config +{% endif %} +{% if tls_min_version is defined %} + tls-min-version: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} + tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} + +{% endif %} +{% if event_ttl_duration is defined %} + event-ttl: {{ event_ttl_duration }} +{% endif %} +{% if kubelet_rotate_server_certificates %} + kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt +{% endif %} +{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %} + extraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }}/cloud_config + mountPath: {{ kube_config_dir }}/cloud_config +{% endif %} +{% if kube_token_auth|default(true) %} + - name: token-auth-config + hostPath: {{ kube_token_dir }} + mountPath: {{ kube_token_dir }} +{% endif %} +{% if kube_webhook_token_auth|default(false) %} + - name: webhook-token-auth-config + hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml + mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml +{% endif %} +{% if kube_webhook_authorization|default(false) %} + - name: webhook-authorization-config + hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml + mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml +{% endif %} +{% if kubernetes_audit or kubernetes_audit_webhook %} + - name: {{ audit_policy_name }} + hostPath: {{ audit_policy_hostpath }} + mountPath: {{ audit_policy_mountpath }} +{% if audit_log_path != "-" %} + - name: {{ audit_log_name }} + hostPath: {{ audit_log_hostpath }} + mountPath: {{ audit_log_mountpath }} + readOnly: false +{% endif %} +{% endif %} +{% if kube_apiserver_admission_control_config_file %} + - name: admission-control-configs + hostPath: {{ kube_config_dir }}/admission-controls + mountPath: {{ kube_config_dir }} + readOnly: false + pathType: DirectoryOrCreate +{% endif %} +{% for volume in apiserver_extra_volumes %} + - name: {{ volume.name }} + hostPath: {{ volume.hostPath }} + mountPath: {{ volume.mountPath }} + readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} +{% endfor %} +{% if ssl_ca_dirs|length %} +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: {{ dir }} + mountPath: {{ dir }} + readOnly: true +{% endfor %} +{% endif %} +{% endif %} + certSANs: +{% for san in apiserver_sans %} + - {{ san }} +{% endfor %} + timeoutForControlPlane: 5m0s +controllerManager: + extraArgs: + node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} + node-monitor-period: {{ kube_controller_node_monitor_period }} +{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} + cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +{% endif %} + service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" +{% if enable_dual_stack_networks %} + node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}" + node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}" +{% else %} + node-cidr-mask-size: "{{ kube_network_node_prefix }}" +{% endif %} + profiling: "{{ kube_profiling }}" + terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}" + bind-address: {{ kube_controller_manager_bind_address }} + leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }} + leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }} +{% if kube_controller_feature_gates or kube_feature_gates %} + feature-gates: "{{ kube_controller_feature_gates | default(kube_feature_gates, true) | join(',') }}" +{% endif %} +{% for key in kube_kubeadm_controller_extra_args %} + {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}" +{% endfor %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + cloud-provider: {{ cloud_provider }} + cloud-config: {{ kube_config_dir }}/cloud_config +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin not in ["cloud"] %} + configure-cloud-routes: "false" +{% endif %} +{% if kubelet_flexvolumes_plugins_dir is defined %} + flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}} +{% endif %} +{% if tls_min_version is defined %} + tls-min-version: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} + tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} + +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] or controller_manager_extra_volumes %} + extraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %} + - name: openstackcacert + hostPath: "{{ kube_config_dir }}/openstack-cacert.pem" + mountPath: "{{ kube_config_dir }}/openstack-cacert.pem" +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }}/cloud_config + mountPath: {{ kube_config_dir }}/cloud_config +{% endif %} +{% for volume in controller_manager_extra_volumes %} + - name: {{ volume.name }} + hostPath: {{ volume.hostPath }} + mountPath: {{ volume.mountPath }} + readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} +{% endfor %} +{% endif %} +scheduler: + extraArgs: + bind-address: {{ kube_scheduler_bind_address }} + config: {{ kube_config_dir }}/kubescheduler-config.yaml +{% if kube_scheduler_feature_gates or kube_feature_gates %} + feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}" +{% endif %} +{% if kube_kubeadm_scheduler_extra_args|length > 0 %} +{% for key in kube_kubeadm_scheduler_extra_args %} + {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" +{% endfor %} +{% endif %} +{% if tls_min_version is defined %} + tls-min-version: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} + tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} + +{% endif %} + extraVolumes: + - name: kubescheduler-config + hostPath: {{ kube_config_dir }}/kubescheduler-config.yaml + mountPath: {{ kube_config_dir }}/kubescheduler-config.yaml + readOnly: true +{% if scheduler_extra_volumes %} +{% for volume in scheduler_extra_volumes %} + - name: {{ volume.name }} + hostPath: {{ volume.hostPath }} + mountPath: {{ volume.mountPath }} + readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} +{% endfor %} +{% endif %} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +bindAddress: {{ kube_proxy_bind_address }} +clientConnection: + acceptContentTypes: {{ kube_proxy_client_accept_content_types }} + burst: {{ kube_proxy_client_burst }} + contentType: {{ kube_proxy_client_content_type }} + kubeconfig: {{ kube_proxy_client_kubeconfig }} + qps: {{ kube_proxy_client_qps }} +{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} +clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +{% endif %} +configSyncPeriod: {{ kube_proxy_config_sync_period }} +conntrack: + maxPerCore: {{ kube_proxy_conntrack_max_per_core }} + min: {{ kube_proxy_conntrack_min }} + tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} + tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} +enableProfiling: {{ kube_proxy_enable_profiling }} +healthzBindAddress: {{ kube_proxy_healthz_bind_address }} +hostnameOverride: {{ kube_override_hostname }} +iptables: + masqueradeAll: {{ kube_proxy_masquerade_all }} + masqueradeBit: {{ kube_proxy_masquerade_bit }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + syncPeriod: {{ kube_proxy_sync_period }} +ipvs: + excludeCIDRs: {{ kube_proxy_exclude_cidrs }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + scheduler: {{ kube_proxy_scheduler }} + syncPeriod: {{ kube_proxy_sync_period }} + strictARP: {{ kube_proxy_strict_arp }} + tcpTimeout: {{ kube_proxy_tcp_timeout }} + tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }} + udpTimeout: {{ kube_proxy_udp_timeout }} +metricsBindAddress: {{ kube_proxy_metrics_bind_address }} +mode: {{ kube_proxy_mode }} +nodePortAddresses: {{ kube_proxy_nodeport_addresses }} +oomScoreAdj: {{ kube_proxy_oom_score_adj }} +portRange: {{ kube_proxy_port_range }} +udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }} +{% if kube_proxy_feature_gates or kube_feature_gates %} +{% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %} +featureGates: +{% for feature in feature_gates %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} +{# DNS settings for kubelet #} +{% if enable_nodelocaldns %} +{% set kubelet_cluster_dns = [nodelocaldns_ip] %} +{% elif dns_mode in ['coredns'] %} +{% set kubelet_cluster_dns = [skydns_server] %} +{% elif dns_mode == 'coredns_dual' %} +{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %} +{% elif dns_mode == 'manual' %} +{% set kubelet_cluster_dns = [manual_dns_server] %} +{% else %} +{% set kubelet_cluster_dns = [] %} +{% endif %} +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +clusterDNS: +{% for dns_address in kubelet_cluster_dns %} +- {{ dns_address }} +{% endfor %} +{% if kubelet_feature_gates or kube_feature_gates %} +{% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %} +featureGates: +{% for feature in feature_gates %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 new file mode 100644 index 0000000..b4b3c5e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 @@ -0,0 +1,34 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +discovery: + bootstrapToken: +{% if kubeadm_config_api_fqdn is defined %} + apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +{% else %} + apiServerEndpoint: {{ kubeadm_discovery_address }} +{% endif %} + token: {{ kubeadm_token }} + unsafeSkipCAVerification: true + timeout: {{ discovery_timeout }} + tlsBootstrapToken: {{ kubeadm_token }} +controlPlane: + localAPIEndpoint: + advertiseAddress: {{ kube_apiserver_address }} + bindPort: {{ kube_apiserver_port }} + certificateKey: {{ kubeadm_certificate_key }} +nodeRegistration: + name: {{ kube_override_hostname|default(inventory_hostname) }} + criSocket: {{ cri_socket }} +{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +{% else %} + taints: [] +{% endif %} +{% if kubeadm_patches is defined and kubeadm_patches.enabled %} +patches: + directory: {{ kubeadm_patches.dest_dir }} +{% endif %} \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 new file mode 100644 index 0000000..be41418 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 @@ -0,0 +1,25 @@ +{% set kubescheduler_config_api_version = "v1beta3" %} +apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }} +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "{{ kube_config_dir }}/scheduler.conf" +{% for key in kube_scheduler_client_conn_extra_opts %} + {{ key }}: {{ kube_scheduler_client_conn_extra_opts[key] }} +{% endfor %} +{% if kube_scheduler_extenders %} +extenders: +{{ kube_scheduler_extenders | to_nice_yaml(indent=2, width=256) }} +{% endif %} +leaderElection: + leaseDuration: {{ kube_scheduler_leader_elect_lease_duration }} + renewDeadline: {{ kube_scheduler_leader_elect_renew_deadline }} +{% for key in kube_scheduler_leader_elect_extra_opts %} + {{ key }}: {{ kube_scheduler_leader_elect_extra_opts[key] }} +{% endfor %} +{% if kube_scheduler_profiles %} +profiles: +{{ kube_scheduler_profiles | to_nice_yaml(indent=2, width=256) }} +{% endif %} +{% for key in kube_scheduler_config_extra_opts %} +{{ key }}: {{ kube_scheduler_config_extra_opts[key] }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 new file mode 100644 index 0000000..5d39576 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 @@ -0,0 +1,17 @@ +{% if kube_pod_security_use_default %} +apiVersion: pod-security.admission.config.k8s.io/v1beta1 +kind: PodSecurityConfiguration +defaults: + enforce: "{{ kube_pod_security_default_enforce }}" + enforce-version: "{{ kube_pod_security_default_enforce_version }}" + audit: "{{ kube_pod_security_default_audit }}" + audit-version: "{{ kube_pod_security_default_audit_version }}" + warn: "{{ kube_pod_security_default_warn }}" + warn-version: "{{ kube_pod_security_default_warn_version }}" +exemptions: + usernames: {{ kube_pod_security_exemptions_usernames|to_json }} + runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }} + namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }} +{% else %} +# This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 new file mode 100644 index 0000000..d9f0e8d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:privileged + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - policy + resourceNames: + - privileged + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:restricted + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - policy + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 new file mode 100644 index 0000000..7513c3c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 @@ -0,0 +1,54 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: psp:any:restricted +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted +subjects: +- kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp:kube-system:privileged + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:privileged +subjects: +- kind: Group + name: system:masters + apiGroup: rbac.authorization.k8s.io +- kind: Group + name: system:serviceaccounts:kube-system + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp:nodes:privileged + namespace: kube-system + annotations: + kubernetes.io/description: 'Allow nodes to create privileged pods. Should + be used in combination with the NodeRestriction admission plugin to limit + nodes to mirror pods bound to themselves.' + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:privileged +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:nodes + - kind: User + apiGroup: rbac.authorization.k8s.io + # Legacy node ID + name: kubelet diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp.yml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp.yml.j2 new file mode 100644 index 0000000..5da5400 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/psp.yml.j2 @@ -0,0 +1,27 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + {{ podsecuritypolicy_restricted_spec | to_yaml(indent=2, width=1337) | indent(width=2) }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + {{ podsecuritypolicy_privileged_spec | to_yaml(indent=2, width=1337) | indent(width=2) }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 new file mode 100644 index 0000000..9105bb6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 @@ -0,0 +1,11 @@ +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: +{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }} + providers: + - {{ kube_encryption_algorithm }}: + keys: + - name: key + secret: {{ kube_encrypt_token | b64encode }} + - identity: {} diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/webhook-authorization-config.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/webhook-authorization-config.yaml.j2 new file mode 100644 index 0000000..b5b5530 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/webhook-authorization-config.yaml.j2 @@ -0,0 +1,18 @@ +# clusters refers to the remote service. +clusters: +- name: webhook-token-authz-cluster + cluster: + server: {{ kube_webhook_authorization_url }} + insecure-skip-tls-verify: {{ kube_webhook_authorization_url_skip_tls_verify }} + +# users refers to the API server's webhook configuration. +users: +- name: webhook-token-authz-user + +# kubeconfig files require a context. Provide one for the API server. +current-context: webhook-token-authz +contexts: +- context: + cluster: webhook-token-authz-cluster + user: webhook-token-authz-user + name: webhook-token-authz diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 new file mode 100644 index 0000000..f152d11 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 @@ -0,0 +1,21 @@ +# clusters refers to the remote service. +clusters: +- name: webhook-token-auth-cluster + cluster: + server: {{ kube_webhook_token_auth_url }} + insecure-skip-tls-verify: {{ kube_webhook_token_auth_url_skip_tls_verify }} +{% if kube_webhook_token_auth_ca_data is defined %} + certificate-authority-data: {{ kube_webhook_token_auth_ca_data }} +{% endif %} + +# users refers to the API server's webhook configuration. +users: +- name: webhook-token-auth-user + +# kubeconfig files require a context. Provide one for the API server. +current-context: webhook-token-auth +contexts: +- context: + cluster: webhook-token-auth-cluster + user: webhook-token-auth-user + name: webhook-token-auth diff --git a/kubespray/extra_playbooks/roles/kubernetes/control-plane/vars/main.yaml b/kubespray/extra_playbooks/roles/kubernetes/control-plane/vars/main.yaml new file mode 100644 index 0000000..f888d6b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/control-plane/vars/main.yaml @@ -0,0 +1,3 @@ +--- +# list of admission plugins that needs to be configured +kube_apiserver_admission_plugins_needs_configuration: [EventRateLimit, PodSecurity] diff --git a/kubespray/extra_playbooks/roles/kubernetes/kubeadm/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/defaults/main.yml new file mode 100644 index 0000000..0449b8a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/defaults/main.yml @@ -0,0 +1,12 @@ +--- +# discovery_timeout modifies the discovery timeout +# This value must be smaller than kubeadm_join_timeout +discovery_timeout: 60s +kubeadm_join_timeout: 120s + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} diff --git a/kubespray/extra_playbooks/roles/kubernetes/kubeadm/handlers/main.yml b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/handlers/main.yml new file mode 100644 index 0000000..4c2b125 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: Kubeadm | restart kubelet + command: /bin/true + notify: + - Kubeadm | reload systemd + - Kubeadm | reload kubelet + +- name: Kubeadm | reload systemd + systemd: + daemon_reload: true + +- name: Kubeadm | reload kubelet + service: + name: kubelet + state: restarted diff --git a/kubespray/extra_playbooks/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml new file mode 100644 index 0000000..c87b840 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml @@ -0,0 +1,61 @@ +--- +- name: Parse certificate key if not set + set_fact: + kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_certificate_key'] }}" + when: kubeadm_certificate_key is undefined + +- name: Pull control plane certs down + shell: >- + {{ bin_dir }}/kubeadm join phase + control-plane-prepare download-certs + --certificate-key {{ kubeadm_certificate_key }} + --control-plane + --token {{ kubeadm_token }} + --discovery-token-unsafe-skip-ca-verification + {{ kubeadm_discovery_address }} + && + {{ bin_dir }}/kubeadm join phase + control-plane-prepare certs + --control-plane + --token {{ kubeadm_token }} + --discovery-token-unsafe-skip-ca-verification + {{ kubeadm_discovery_address }} + args: + creates: "{{ kube_cert_dir }}/apiserver-etcd-client.key" + +- name: Delete unneeded certificates + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ kube_cert_dir }}/apiserver.crt" + - "{{ kube_cert_dir }}/apiserver.key" + - "{{ kube_cert_dir }}/ca.key" + - "{{ kube_cert_dir }}/etcd/ca.key" + - "{{ kube_cert_dir }}/etcd/healthcheck-client.crt" + - "{{ kube_cert_dir }}/etcd/healthcheck-client.key" + - "{{ kube_cert_dir }}/etcd/peer.crt" + - "{{ kube_cert_dir }}/etcd/peer.key" + - "{{ kube_cert_dir }}/etcd/server.crt" + - "{{ kube_cert_dir }}/etcd/server.key" + - "{{ kube_cert_dir }}/front-proxy-ca.crt" + - "{{ kube_cert_dir }}/front-proxy-ca.key" + - "{{ kube_cert_dir }}/front-proxy-client.crt" + - "{{ kube_cert_dir }}/front-proxy-client.key" + - "{{ kube_cert_dir }}/sa.key" + - "{{ kube_cert_dir }}/sa.pub" + +- name: Calculate etcd cert serial + command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial" + register: "etcd_client_cert_serial_result" + changed_when: false + when: + - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort + tags: + - network + +- name: Set etcd_client_cert_serial + set_fact: + etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" + tags: + - network diff --git a/kubespray/extra_playbooks/roles/kubernetes/kubeadm/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/tasks/main.yml new file mode 100644 index 0000000..a3cc862 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/tasks/main.yml @@ -0,0 +1,176 @@ +--- +- name: Set kubeadm_discovery_address + set_fact: + kubeadm_discovery_address: >- + {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} + {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- else -%} + {{ kube_apiserver_endpoint | replace("https://", "") }} + {%- endif %} + tags: + - facts + +- name: Check if kubelet.conf exists + stat: + path: "{{ kube_config_dir }}/kubelet.conf" + get_attributes: no + get_checksum: no + get_mime: no + register: kubelet_conf + +- name: Check if kubeadm CA cert is accessible + stat: + path: "{{ kube_cert_dir }}/ca.crt" + get_attributes: no + get_checksum: no + get_mime: no + register: kubeadm_ca_stat + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + +- name: Calculate kubeadm CA cert hash + shell: set -o pipefail && openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' + args: + executable: /bin/bash + register: kubeadm_ca_hash + when: + - kubeadm_ca_stat.stat is defined + - kubeadm_ca_stat.stat.exists + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + changed_when: false + +- name: Create kubeadm token for joining nodes with 24h expiration (default) + command: "{{ bin_dir }}/kubeadm token create" + register: temp_token + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kubeadm_token is not defined + changed_when: false + +- name: Set kubeadm_token to generated token + set_fact: + kubeadm_token: "{{ temp_token.stdout }}" + when: kubeadm_token is not defined + +- name: Set kubeadm api version to v1beta3 + set_fact: + kubeadmConfig_api_version: v1beta3 + +- name: Create kubeadm client config + template: + src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2" + dest: "{{ kube_config_dir }}/kubeadm-client.conf" + backup: yes + mode: 0640 + when: not is_kube_master + +- name: kubeadm | Create directory to store kubeadm patches + file: + path: "{{ kubeadm_patches.dest_dir }}" + state: directory + mode: 0640 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: kubeadm | Copy kubeadm patches from inventory files + copy: + src: "{{ kubeadm_patches.source_dir }}/" + dest: "{{ kubeadm_patches.dest_dir }}" + owner: "root" + mode: 0644 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: Join to cluster if needed + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin" + when: not is_kube_master and (not kubelet_conf.stat.exists) + block: + + - name: Join to cluster + command: >- + timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }} + {{ bin_dir }}/kubeadm join + --config {{ kube_config_dir }}/kubeadm-client.conf + --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests + --skip-phases={{ kubeadm_join_phases_skip | join(',') }} + register: kubeadm_join + changed_when: kubeadm_join is success + + rescue: + + - name: Join to cluster with ignores + command: >- + timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }} + {{ bin_dir }}/kubeadm join + --config {{ kube_config_dir }}/kubeadm-client.conf + --ignore-preflight-errors=all + --skip-phases={{ kubeadm_join_phases_skip | join(',') }} + register: kubeadm_join + changed_when: kubeadm_join is success + + always: + + - name: Display kubeadm join stderr if any + when: kubeadm_join is failed + debug: + msg: | + Joined with warnings + {{ kubeadm_join.stderr_lines }} + +- name: Update server field in kubelet kubeconfig + lineinfile: + dest: "{{ kube_config_dir }}/kubelet.conf" + regexp: 'server:' + line: ' server: {{ kube_apiserver_endpoint }}' + backup: yes + when: + - kubeadm_config_api_fqdn is not defined + - not is_kube_master + - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") + notify: Kubeadm | restart kubelet + +# FIXME(mattymo): Need to point to localhost, otherwise masters will all point +# incorrectly to first master, creating SPoF. +- name: Update server field in kube-proxy kubeconfig + shell: >- + set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml + | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' + | {{ kubectl }} replace -f - + args: + executable: /bin/bash + run_once: true + delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_facts: false + when: + - kubeadm_config_api_fqdn is not defined + - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") + - kube_proxy_deployed + - loadbalancer_apiserver_localhost + tags: + - kube-proxy + +- name: Set ca.crt file permission + file: + path: "{{ kube_cert_dir }}/ca.crt" + owner: root + group: root + mode: "0644" + +- name: Restart all kube-proxy pods to ensure that they load the new configmap + command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" + run_once: true + delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_facts: false + when: + - kubeadm_config_api_fqdn is not defined + - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") + - kube_proxy_deployed + tags: + - kube-proxy + +- name: Extract etcd certs from control plane if using etcd kubeadm mode + include_tasks: kubeadm_etcd_node.yml + when: + - etcd_deployment_type == "kubeadm" + - inventory_hostname not in groups['kube_control_plane'] + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" diff --git a/kubespray/extra_playbooks/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 new file mode 100644 index 0000000..64c3db9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +discovery: + bootstrapToken: +{% if kubeadm_config_api_fqdn is defined %} + apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +{% else %} + apiServerEndpoint: {{ kubeadm_discovery_address }} +{% endif %} + token: {{ kubeadm_token }} +{% if kubeadm_ca_hash.stdout is defined %} + caCertHashes: + - sha256:{{ kubeadm_ca_hash.stdout }} +{% else %} + unsafeSkipCAVerification: true +{% endif %} + timeout: {{ discovery_timeout }} + tlsBootstrapToken: {{ kubeadm_token }} +caCertPath: {{ kube_cert_dir }}/ca.crt +nodeRegistration: + name: '{{ kube_override_hostname }}' + criSocket: {{ cri_socket }} +{% if 'calico_rr' in group_names and 'kube_node' not in group_names %} + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/calico-rr +{% endif %} +{% if kubeadm_patches is defined and kubeadm_patches.enabled %} +patches: + directory: {{ kubeadm_patches.dest_dir }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node-label/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/node-label/tasks/main.yml new file mode 100644 index 0000000..f91e7f4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node-label/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Kubernetes Apps | Wait for kube-apiserver + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + client_cert: "{{ kube_apiserver_client_cert }}" + client_key: "{{ kube_apiserver_client_key }}" + register: result + until: result.status == 200 + retries: 10 + delay: 6 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Set role node label to empty list + set_fact: + role_node_labels: [] + +- name: Node label for nvidia GPU nodes + set_fact: + role_node_labels: "{{ role_node_labels + [ 'nvidia.com/gpu=true' ] }}" + when: + - nvidia_gpu_nodes is defined + - nvidia_accelerator_enabled|bool + - inventory_hostname in nvidia_gpu_nodes + +- name: Set inventory node label to empty list + set_fact: + inventory_node_labels: [] + +- name: Populate inventory node label + set_fact: + inventory_node_labels: "{{ inventory_node_labels + [ '%s=%s'|format(item.key, item.value) ] }}" + loop: "{{ node_labels|d({})|dict2items }}" + when: + - node_labels is defined + - node_labels is mapping + +- debug: # noqa unnamed-task + var: role_node_labels +- debug: # noqa unnamed-task + var: inventory_node_labels + +- name: Set label to node + command: >- + {{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true + loop: "{{ role_node_labels + inventory_node_labels }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + changed_when: false +... diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes/node/defaults/main.yml new file mode 100644 index 0000000..8be6174 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/defaults/main.yml @@ -0,0 +1,235 @@ +--- +# advertised host IP for kubelet. This affects network plugin config. Take caution +kubelet_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}" + +# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces +kubelet_bind_address: "{{ ip | default('0.0.0.0') }}" + +# resolv.conf to base dns config +kube_resolv_conf: "/etc/resolv.conf" + +# Set to empty to avoid cgroup creation +kubelet_enforce_node_allocatable: "\"\"" + +# Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +kubelet_runtime_cgroups: "/systemd/system.slice" +kubelet_kubelet_cgroups: "/systemd/system.slice" + +# Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +kubelet_runtime_cgroups_cgroupfs: "/system.slice/containerd.service" +kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +### fail with swap on (default true) +kubelet_fail_swap_on: true + +# Set systemd service hardening features +kubelet_systemd_hardening: false + +# List of secure IPs for kubelet +kubelet_secure_addresses: >- + {%- for host in groups['kube_control_plane'] -%} + {{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ ' ' if not loop.last else '' }} + {%- endfor -%} + +# Reserve this space for kube resources +kube_memory_reserved: 256Mi +kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" +# Reservation for master hosts +kube_master_memory_reserved: 512Mi +kube_master_cpu_reserved: 200m +# kube_master_ephemeral_storage_reserved: 2Gi +# kube_master_pid_reserved: "1000" + +# Set to true to reserve resources for system daemons +system_reserved: false +system_memory_reserved: 512Mi +system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +# system_pid_reserved: "1000" +# Reservation for master hosts +system_master_memory_reserved: 256Mi +system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi +# system_master_pid_reserved: "1000" + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +eviction_hard: {} +eviction_hard_control_plane: {} + +kubelet_status_update_frequency: 10s + +# kube-vip +kube_vip_version: v0.5.5 + +kube_vip_arp_enabled: false +kube_vip_interface: +kube_vip_services_interface: +kube_vip_cidr: 32 +kube_vip_controlplane_enabled: false +kube_vip_ddns_enabled: false +kube_vip_services_enabled: false +kube_vip_leader_election_enabled: "{{ kube_vip_arp_enabled }}" +kube_vip_bgp_enabled: false +kube_vip_bgp_routerid: +kube_vip_local_as: 65000 +kube_vip_bgp_peeraddress: +kube_vip_bgp_peerpass: +kube_vip_bgp_peeras: 65000 +kube_vip_bgppeers: +kube_vip_address: + +# Requests for load balancer app +loadbalancer_apiserver_memory_requests: 32M +loadbalancer_apiserver_cpu_requests: 25m + +loadbalancer_apiserver_keepalive_timeout: 5m + +# Uncomment if you need to enable deprecated runtimes +# kube_api_runtime_config: +# - apps/v1beta1=true +# - apps/v1beta2=true +# - extensions/v1beta1/daemonsets=true +# - extensions/v1beta1/deployments=true +# - extensions/v1beta1/replicasets=true +# - extensions/v1beta1/networkpolicies=true +# - extensions/v1beta1/podsecuritypolicies=true + +# A port range to reserve for services with NodePort visibility. +# Inclusive at both ends of the range. +kube_apiserver_node_port_range: "30000-32767" + +# Configure the amount of pods able to run on single node +# default is equal to application default +kubelet_max_pods: 110 + +# Sets the maximum number of processes running per Pod +# Default value -1 = unlimited +kubelet_pod_pids_limit: -1 + +## Support parameters to be passed to kubelet via kubelet-config.yaml +kubelet_config_extra_args: {} + +## Parameters to be passed to kubelet via kubelet-config.yaml when cgroupfs is used as cgroup driver +kubelet_config_extra_args_cgroupfs: + systemCgroups: /system.slice + cgroupRoot: / + +## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters +kubelet_node_config_extra_args: {} + +# Maximum number of container log files that can be present for a container. +kubelet_logfiles_max_nr: 5 + +# Maximum size of the container log file before it is rotated +kubelet_logfiles_max_size: 10Mi + +## Support custom flags to be passed to kubelet +kubelet_custom_flags: [] + +## Support custom flags to be passed to kubelet only on nodes, not masters +kubelet_node_custom_flags: [] + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} + +# The read-only port for the Kubelet to serve on with no authentication/authorization. +kube_read_only_port: 0 + +# Port for healthz for Kubelet +kubelet_healthz_port: 10248 + +# Bind address for healthz for Kubelet +kubelet_healthz_bind_address: 127.0.0.1 + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +# For the openstack integration kubelet will need credentials to access +# openstack apis like nova and cinder. Per default this values will be +# read from the environment. +openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +openstack_username: "{{ lookup('env','OS_USERNAME') }}" +openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}" +openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" +openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" + +# For the vsphere integration, kubelet will need credentials to access +# vsphere apis +# Documentation regarding these values can be found +# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 +vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" +vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" +vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" +vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" +vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}" +vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}" +vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}" +vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}" +vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" + +vsphere_scsi_controller_type: pvscsi +# vsphere_public_network is name of the network the VMs are joined to +vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" + +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_vnet_name: +# azure_route_table_name: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard +# Sku of Load Balancer and Public IP. Candidate values are: basic and standard. +azure_loadbalancer_sku: basic +# excludes master nodes from standard load balancer. +azure_exclude_master_from_standard_lb: true +# disables the outbound SNAT for public load balancer rules +azure_disable_outbound_snat: false +# use instance metadata service where possible +azure_use_instance_metadata: true +# use specific Azure API endpoints +azure_cloud: AzurePublicCloud + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/handlers/main.yml b/kubespray/extra_playbooks/roles/kubernetes/node/handlers/main.yml new file mode 100644 index 0000000..512b4e8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: Node | restart kubelet + command: /bin/true + notify: + - Kubelet | reload systemd + - Kubelet | restart kubelet + +- name: Kubelet | reload systemd + systemd: + daemon_reload: true + +- name: Kubelet | restart kubelet + service: + name: kubelet + state: restarted diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml new file mode 100644 index 0000000..62337fc --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml @@ -0,0 +1,82 @@ +--- +- name: check azure_tenant_id value + fail: + msg: "azure_tenant_id is missing" + when: azure_tenant_id is not defined or not azure_tenant_id + +- name: check azure_subscription_id value + fail: + msg: "azure_subscription_id is missing" + when: azure_subscription_id is not defined or not azure_subscription_id + +- name: check azure_aad_client_id value + fail: + msg: "azure_aad_client_id is missing" + when: azure_aad_client_id is not defined or not azure_aad_client_id + +- name: check azure_aad_client_secret value + fail: + msg: "azure_aad_client_secret is missing" + when: azure_aad_client_secret is not defined or not azure_aad_client_secret + +- name: check azure_resource_group value + fail: + msg: "azure_resource_group is missing" + when: azure_resource_group is not defined or not azure_resource_group + +- name: check azure_location value + fail: + msg: "azure_location is missing" + when: azure_location is not defined or not azure_location + +- name: check azure_subnet_name value + fail: + msg: "azure_subnet_name is missing" + when: azure_subnet_name is not defined or not azure_subnet_name + +- name: check azure_security_group_name value + fail: + msg: "azure_security_group_name is missing" + when: azure_security_group_name is not defined or not azure_security_group_name + +- name: check azure_vnet_name value + fail: + msg: "azure_vnet_name is missing" + when: azure_vnet_name is not defined or not azure_vnet_name + +- name: check azure_vnet_resource_group value + fail: + msg: "azure_vnet_resource_group is missing" + when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group + +- name: check azure_route_table_name value + fail: + msg: "azure_route_table_name is missing" + when: azure_route_table_name is not defined or not azure_route_table_name + +- name: check azure_loadbalancer_sku value + fail: + msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'" + when: azure_loadbalancer_sku not in ["basic", "standard"] + +- name: "check azure_exclude_master_from_standard_lb is a bool" + assert: + that: azure_exclude_master_from_standard_lb |type_debug == 'bool' + +- name: "check azure_disable_outbound_snat is a bool" + assert: + that: azure_disable_outbound_snat |type_debug == 'bool' + +- name: "check azure_use_instance_metadata is a bool" + assert: + that: azure_use_instance_metadata |type_debug == 'bool' + +- name: check azure_vmtype value + fail: + msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'" + when: azure_vmtype is not defined or not azure_vmtype + +- name: check azure_cloud value + fail: + msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'." + when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"] diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml new file mode 100644 index 0000000..6ff1732 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml @@ -0,0 +1,34 @@ +--- +- name: check openstack_auth_url value + fail: + msg: "openstack_auth_url is missing" + when: openstack_auth_url is not defined or not openstack_auth_url + +- name: check openstack_username value + fail: + msg: "openstack_username is missing" + when: openstack_username is not defined or not openstack_username + +- name: check openstack_password value + fail: + msg: "openstack_password is missing" + when: openstack_password is not defined or not openstack_password + +- name: check openstack_region value + fail: + msg: "openstack_region is missing" + when: openstack_region is not defined or not openstack_region + +- name: check openstack_tenant_id value + fail: + msg: "one of openstack_tenant_id or openstack_trust_id must be specified" + when: + - openstack_tenant_id is not defined or not openstack_tenant_id + - openstack_trust_id is not defined + +- name: check openstack_trust_id value + fail: + msg: "one of openstack_tenant_id or openstack_trust_id must be specified" + when: + - openstack_trust_id is not defined or not openstack_trust_id + - openstack_tenant_id is not defined diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml new file mode 100644 index 0000000..873eb71 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml @@ -0,0 +1,22 @@ +--- +- name: check vsphere environment variables + fail: + msg: "{{ item.name }} is missing" + when: item.value is not defined or not item.value + with_items: + - name: vsphere_vcenter_ip + value: "{{ vsphere_vcenter_ip }}" + - name: vsphere_vcenter_port + value: "{{ vsphere_vcenter_port }}" + - name: vsphere_user + value: "{{ vsphere_user }}" + - name: vsphere_password + value: "{{ vsphere_password }}" + - name: vsphere_datacenter + value: "{{ vsphere_datacenter }}" + - name: vsphere_datastore + value: "{{ vsphere_datastore }}" + - name: vsphere_working_dir + value: "{{ vsphere_working_dir }}" + - name: vsphere_insecure + value: "{{ vsphere_insecure }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/facts.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/facts.yml new file mode 100644 index 0000000..97d52e8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/facts.yml @@ -0,0 +1,57 @@ +--- +- block: + - name: look up docker cgroup driver + shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" + register: docker_cgroup_driver_result + changed_when: false + check_mode: no + + - name: set kubelet_cgroup_driver_detected fact for docker + set_fact: + kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}" + when: container_manager == 'docker' + +- block: + - name: look up crio cgroup driver + shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" + register: crio_cgroup_driver_result + changed_when: false + + - name: set kubelet_cgroup_driver_detected fact for crio + set_fact: + kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}" + when: container_manager == 'crio' + +- name: set kubelet_cgroup_driver_detected fact for containerd + set_fact: + kubelet_cgroup_driver_detected: >- + {%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%} + when: container_manager == 'containerd' + +- name: set kubelet_cgroup_driver + set_fact: + kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}" + when: kubelet_cgroup_driver is undefined + +- name: set kubelet_cgroups options when cgroupfs is used + set_fact: + kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}" + kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}" + when: kubelet_cgroup_driver == 'cgroupfs' + +- name: set kubelet_config_extra_args options when cgroupfs is used + vars: + set_fact: + kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}" + when: kubelet_cgroup_driver == 'cgroupfs' + +- name: os specific vars + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + skip: true diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/install.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/install.yml new file mode 100644 index 0000000..cf7a1d8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/install.yml @@ -0,0 +1,22 @@ +--- +- name: install | Copy kubeadm binary from download dir + copy: + src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubeadm" + mode: 0755 + remote_src: true + tags: + - kubeadm + when: + - not inventory_hostname in groups['kube_control_plane'] + +- name: install | Copy kubelet binary from download dir + copy: + src: "{{ local_release_dir }}/kubelet-{{ kube_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubelet" + mode: 0755 + remote_src: true + tags: + - kubelet + - upgrade + notify: Node | restart kubelet diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/kubelet.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/kubelet.yml new file mode 100644 index 0000000..c551f77 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/kubelet.yml @@ -0,0 +1,52 @@ +--- +- name: Set kubelet api version to v1beta1 + set_fact: + kubeletConfig_api_version: v1beta1 + tags: + - kubelet + - kubeadm + +- name: Write kubelet environment config file (kubeadm) + template: + src: "kubelet.env.{{ kubeletConfig_api_version }}.j2" + dest: "{{ kube_config_dir }}/kubelet.env" + setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}" + backup: yes + mode: 0640 + notify: Node | restart kubelet + tags: + - kubelet + - kubeadm + +- name: Write kubelet config file + template: + src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/kubelet-config.yaml" + mode: 0640 + notify: Kubelet | restart kubelet + tags: + - kubelet + - kubeadm + +- name: Write kubelet systemd init file + template: + src: "kubelet.service.j2" + dest: "/etc/systemd/system/kubelet.service" + backup: "yes" + mode: 0644 + notify: Node | restart kubelet + tags: + - kubelet + - kubeadm + +- name: flush_handlers and reload-systemd + meta: flush_handlers + +- name: Enable kubelet + service: + name: kubelet + enabled: yes + state: started + tags: + - kubelet + notify: Kubelet | restart kubelet diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml new file mode 100644 index 0000000..c8e0108 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml @@ -0,0 +1,34 @@ +--- +- name: haproxy | Cleanup potentially deployed nginx-proxy + file: + path: "{{ kube_manifest_dir }}/nginx-proxy.yml" + state: absent + +- name: haproxy | Make haproxy directory + file: + path: "{{ haproxy_config_dir }}" + state: directory + mode: 0755 + owner: root + +- name: haproxy | Write haproxy configuration + template: + src: "loadbalancer/haproxy.cfg.j2" + dest: "{{ haproxy_config_dir }}/haproxy.cfg" + owner: root + mode: 0755 + backup: yes + +- name: haproxy | Get checksum from config + stat: + path: "{{ haproxy_config_dir }}/haproxy.cfg" + get_attributes: no + get_checksum: yes + get_mime: no + register: haproxy_stat + +- name: haproxy | Write static pod + template: + src: manifests/haproxy.manifest.j2 + dest: "{{ kube_manifest_dir }}/haproxy.yml" + mode: 0640 diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml new file mode 100644 index 0000000..e12bd9b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml @@ -0,0 +1,13 @@ +--- +- name: kube-vip | Check cluster settings for kube-vip + fail: + msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md" + when: + - kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp + - kube_vip_arp_enabled + +- name: kube-vip | Write static pod + template: + src: manifests/kube-vip.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-vip.yml" + mode: 0640 diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml new file mode 100644 index 0000000..e176cb9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml @@ -0,0 +1,34 @@ +--- +- name: haproxy | Cleanup potentially deployed haproxy + file: + path: "{{ kube_manifest_dir }}/haproxy.yml" + state: absent + +- name: nginx-proxy | Make nginx directory + file: + path: "{{ nginx_config_dir }}" + state: directory + mode: 0700 + owner: root + +- name: nginx-proxy | Write nginx-proxy configuration + template: + src: "loadbalancer/nginx.conf.j2" + dest: "{{ nginx_config_dir }}/nginx.conf" + owner: root + mode: 0755 + backup: yes + +- name: nginx-proxy | Get checksum from config + stat: + path: "{{ nginx_config_dir }}/nginx.conf" + get_attributes: no + get_checksum: yes + get_mime: no + register: nginx_stat + +- name: nginx-proxy | Write static pod + template: + src: manifests/nginx-proxy.manifest.j2 + dest: "{{ kube_manifest_dir }}/nginx-proxy.yml" + mode: 0640 diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/main.yml new file mode 100644 index 0000000..59dc300 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/main.yml @@ -0,0 +1,193 @@ +--- +- import_tasks: facts.yml + tags: + - facts + +- import_tasks: pre_upgrade.yml + tags: + - kubelet + +- name: Ensure /var/lib/cni exists + file: + path: /var/lib/cni + state: directory + mode: 0755 + +- import_tasks: install.yml + tags: + - kubelet + +- import_tasks: loadbalancer/kube-vip.yml + when: + - is_kube_master + - kube_vip_enabled + tags: + - kube-vip + +- import_tasks: loadbalancer/nginx-proxy.yml + when: + - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0' + - loadbalancer_apiserver_localhost + - loadbalancer_apiserver_type == 'nginx' + tags: + - nginx + +- import_tasks: loadbalancer/haproxy.yml + when: + - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0' + - loadbalancer_apiserver_localhost + - loadbalancer_apiserver_type == 'haproxy' + tags: + - haproxy + +- name: Ensure nodePort range is reserved + sysctl: + name: net.ipv4.ip_local_reserved_ports + value: "{{ kube_apiserver_node_port_range }}" + sysctl_set: yes + sysctl_file: "{{ sysctl_file_path }}" + state: present + reload: yes + when: kube_apiserver_node_port_range is defined + tags: + - kube-proxy + +- name: Verify if br_netfilter module exists + command: "modinfo br_netfilter" + environment: + PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + check_mode: no + +- name: Verify br_netfilter module path exists + file: + path: /etc/modules-load.d + state: directory + mode: 0755 + +- name: Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +- name: Persist br_netfilter module + copy: + dest: /etc/modules-load.d/kubespray-br_netfilter.conf + content: br_netfilter + mode: 0644 + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module +- name: Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + check_mode: no + register: sysctl_bridge_nf_call_iptables + +- name: Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + sysctl_file: "{{ sysctl_file_path }}" + value: "1" + reload: yes + when: sysctl_bridge_nf_call_iptables.rc == 0 + with_items: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + +- name: Modprobe Kernel Module for IPVS + modprobe: + name: "{{ item }}" + state: present + with_items: + - ip_vs + - ip_vs_rr + - ip_vs_wrr + - ip_vs_sh + when: kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + +- name: Modprobe nf_conntrack_ipv4 + modprobe: + name: nf_conntrack_ipv4 + state: present + register: modprobe_nf_conntrack_ipv4 + ignore_errors: true # noqa ignore-errors + when: + - kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + +- name: Persist ip_vs modules + copy: + dest: /etc/modules-load.d/kube_proxy-ipvs.conf + mode: 0644 + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + {% if modprobe_nf_conntrack_ipv4 is success -%} + nf_conntrack_ipv4 + {%- endif -%} + when: kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + +- include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml" + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + tags: + - cloud-provider + - facts + +- name: Test if openstack_cacert is a base64 string + set_fact: + openstack_cacert_is_base64: "{% if openstack_cacert is search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}" + when: + - cloud_provider is defined + - cloud_provider == 'openstack' + - openstack_cacert is defined + - openstack_cacert | length > 0 + + +- name: Write cacert file + copy: + src: "{{ openstack_cacert if not openstack_cacert_is_base64 else omit }}" + content: "{{ openstack_cacert | b64decode if openstack_cacert_is_base64 else omit }}" + dest: "{{ kube_config_dir }}/openstack-cacert.pem" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider == 'openstack' + - openstack_cacert is defined + - openstack_cacert | length > 0 + tags: + - cloud-provider + +- name: Write cloud-config + template: + src: "cloud-configs/{{ cloud_provider }}-cloud-config.j2" + dest: "{{ kube_config_dir }}/cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws', 'gce' ] + notify: Node | restart kubelet + tags: + - cloud-provider + +- import_tasks: kubelet.yml + tags: + - kubelet + - kubeadm diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/tasks/pre_upgrade.yml b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/pre_upgrade.yml new file mode 100644 index 0000000..d9c2d07 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -0,0 +1,48 @@ +--- +- name: "Pre-upgrade | check if kubelet container exists" + shell: >- + set -o pipefail && + {% if container_manager in ['crio', 'docker'] %} + {{ docker_bin_dir }}/docker ps -af name=kubelet | grep kubelet + {% elif container_manager == 'containerd' %} + {{ bin_dir }}/crictl ps --all --name kubelet | grep kubelet + {% endif %} + args: + executable: /bin/bash + failed_when: false + changed_when: false + check_mode: no + register: kubelet_container_check + +- name: "Pre-upgrade | copy /var/lib/cni from kubelet" + command: >- + {% if container_manager in ['crio', 'docker'] %} + docker cp kubelet:/var/lib/cni /var/lib/cni + {% elif container_manager == 'containerd' %} + ctr run --rm --mount type=bind,src=/var/lib/cni,dst=/cnilibdir,options=rbind:rw kubelet kubelet-tmp sh -c 'cp /var/lib/cni/* /cnilibdir/' + {% endif %} + args: + creates: "/var/lib/cni" + failed_when: false + when: kubelet_container_check.rc == 0 + +- name: "Pre-upgrade | ensure kubelet container service is stopped if using host deployment" + service: + name: kubelet + state: stopped + when: kubelet_container_check.rc == 0 + +- name: "Pre-upgrade | ensure kubelet container is removed if using host deployment" + shell: >- + {% if container_manager in ['crio', 'docker'] %} + {{ docker_bin_dir }}/docker rm -fv kubelet + {% elif container_manager == 'containerd' %} + {{ bin_dir }}/crictl stop kubelet && {{ bin_dir }}/crictl rm kubelet + {% endif %} + failed_when: false + changed_when: false + register: remove_kubelet_container + retries: 4 + until: remove_kubelet_container.rc == 0 + delay: 5 + when: kubelet_container_check.rc == 0 diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/aws-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/aws-cloud-config.j2 new file mode 100644 index 0000000..f6d0c3d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/aws-cloud-config.j2 @@ -0,0 +1,11 @@ +[Global] +zone={{ aws_zone|default("") }} +vpc={{ aws_vpc|default("") }} +subnetId={{ aws_subnet_id|default("") }} +routeTableId={{ aws_route_table_id|default("") }} +roleArn={{ aws_role_arn|default("") }} +kubernetesClusterTag={{ aws_kubernetes_cluster_tag|default("") }} +kubernetesClusterId={{ aws_kubernetes_cluster_id|default("") }} +disableSecurityGroupIngress={{ "true" if aws_disable_security_group_ingress|default(False) else "false" }} +disableStrictZoneCheck={{ "true" if aws_disable_strict_zone_check|default(False) else "false" }} +elbSecurityGroup={{ aws_elb_security_group|default("") }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/azure-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/azure-cloud-config.j2 new file mode 100644 index 0000000..2b1c101 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/azure-cloud-config.j2 @@ -0,0 +1,26 @@ +{ + "cloud": "{{ azure_cloud }}", + "tenantId": "{{ azure_tenant_id }}", + "subscriptionId": "{{ azure_subscription_id }}", + "aadClientId": "{{ azure_aad_client_id }}", + "aadClientSecret": "{{ azure_aad_client_secret }}", + "resourceGroup": "{{ azure_resource_group }}", + "location": "{{ azure_location }}", + "subnetName": "{{ azure_subnet_name }}", + "securityGroupName": "{{ azure_security_group_name }}", + "securityGroupResourceGroup": "{{ azure_security_group_resource_group | default(azure_vnet_resource_group) }}", + "vnetName": "{{ azure_vnet_name }}", + "vnetResourceGroup": "{{ azure_vnet_resource_group }}", + "routeTableName": "{{ azure_route_table_name }}", + "routeTableResourceGroup": "{{ azure_route_table_resource_group | default(azure_vnet_resource_group) }}", + "vmType": "{{ azure_vmtype }}", +{% if azure_primary_availability_set_name is defined %} + "primaryAvailabilitySetName": "{{ azure_primary_availability_set_name }}", +{%endif%} + "useInstanceMetadata": {{azure_use_instance_metadata | lower }}, +{% if azure_loadbalancer_sku == "standard" %} + "excludeMasterFromStandardLB": {{ azure_exclude_master_from_standard_lb | lower }}, + "disableOutboundSNAT": {{ azure_disable_outbound_snat | lower }}, +{% endif%} + "loadBalancerSku": "{{ azure_loadbalancer_sku }}" +} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2 new file mode 100644 index 0000000..f4cac50 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2 @@ -0,0 +1,3 @@ +[global] +node-tags = {{ gce_node_tags }} + diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/openstack-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/openstack-cloud-config.j2 new file mode 100644 index 0000000..b1f8e0a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/openstack-cloud-config.j2 @@ -0,0 +1,54 @@ +[Global] +auth-url="{{ openstack_auth_url }}" +username="{{ openstack_username }}" +password="{{ openstack_password }}" +region="{{ openstack_region }}" +{% if openstack_trust_id is defined and openstack_trust_id != "" %} +trust-id="{{ openstack_trust_id }}" +{% else %} +tenant-id="{{ openstack_tenant_id }}" +{% endif %} +{% if openstack_tenant_name is defined and openstack_tenant_name != "" %} +tenant-name="{{ openstack_tenant_name }}" +{% endif %} +{% if openstack_domain_name is defined and openstack_domain_name != "" %} +domain-name="{{ openstack_domain_name }}" +{% elif openstack_domain_id is defined and openstack_domain_id != "" %} +domain-id ="{{ openstack_domain_id }}" +{% endif %} +{% if openstack_cacert is defined and openstack_cacert != "" %} +ca-file="{{ kube_config_dir }}/openstack-cacert.pem" +{% endif %} + +[BlockStorage] +{% if openstack_blockstorage_version is defined %} +bs-version={{ openstack_blockstorage_version }} +{% endif %} +{% if openstack_blockstorage_ignore_volume_az is defined and openstack_blockstorage_ignore_volume_az|bool %} +ignore-volume-az={{ openstack_blockstorage_ignore_volume_az }} +{% endif %} +{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %} +node-volume-attach-limit="{{ node_volume_attach_limit }}" +{% endif %} + +{% if openstack_lbaas_enabled and openstack_lbaas_subnet_id is defined %} +[LoadBalancer] +subnet-id={{ openstack_lbaas_subnet_id }} +{% if openstack_lbaas_floating_network_id is defined %} +floating-network-id={{ openstack_lbaas_floating_network_id }} +{% endif %} +{% if openstack_lbaas_use_octavia is defined %} +use-octavia={{ openstack_lbaas_use_octavia }} +{% endif %} +{% if openstack_lbaas_method is defined %} +lb-method={{ openstack_lbaas_method }} +{% endif %} +{% if openstack_lbaas_provider is defined %} +lb-provider={{ openstack_lbaas_provider }} +{% endif %} + +create-monitor={{ openstack_lbaas_create_monitor }} +monitor-delay={{ openstack_lbaas_monitor_delay }} +monitor-timeout={{ openstack_lbaas_monitor_timeout }} +monitor-max-retries={{ openstack_lbaas_monitor_max_retries }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/vsphere-cloud-config.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/vsphere-cloud-config.j2 new file mode 100644 index 0000000..2cda7f6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/cloud-configs/vsphere-cloud-config.j2 @@ -0,0 +1,36 @@ +[Global] +user = "{{ vsphere_user }}" +password = "{{ vsphere_password }}" +port = {{ vsphere_vcenter_port }} +insecure-flag = {{ vsphere_insecure }} + +datacenters = "{{ vsphere_datacenter }}" + +[VirtualCenter "{{ vsphere_vcenter_ip }}"] + + +[Workspace] +server = "{{ vsphere_vcenter_ip }}" +datacenter = "{{ vsphere_datacenter }}" +folder = "{{ vsphere_working_dir }}" +default-datastore = "{{ vsphere_datastore }}" +{% if vsphere_resource_pool is defined and vsphere_resource_pool != "" %} +resourcepool-path = "{{ vsphere_resource_pool }}" +{% endif %} + + +[Disk] +scsicontrollertype = {{ vsphere_scsi_controller_type }} + +{% if vsphere_public_network is defined and vsphere_public_network != "" %} +[Network] +public-network = {{ vsphere_public_network }} +{% endif %} + +[Labels] +{% if vsphere_zone_category is defined and vsphere_zone_category != "" %} +zone = {{ vsphere_zone_category }} +{% endif %} +{% if vsphere_region_category is defined and vsphere_region_category != "" %} +region = {{ vsphere_region_category }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/http-proxy.conf.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..e790477 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 new file mode 100644 index 0000000..9982f62 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 @@ -0,0 +1,151 @@ +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +nodeStatusUpdateFrequency: "{{ kubelet_status_update_frequency }}" +failSwapOn: {{ kubelet_fail_swap_on|default(true) }} +authentication: + anonymous: + enabled: false + webhook: + enabled: {{ kubelet_authentication_token_webhook }} + x509: + clientCAFile: {{ kube_cert_dir }}/ca.crt +authorization: +{% if kubelet_authorization_mode_webhook %} + mode: Webhook +{% else %} + mode: AlwaysAllow +{% endif %} +{% if kubelet_enforce_node_allocatable is defined and kubelet_enforce_node_allocatable != "\"\"" %} +{% set kubelet_enforce_node_allocatable_list = kubelet_enforce_node_allocatable.split() %} +enforceNodeAllocatable: +{% for item in kubelet_enforce_node_allocatable_list %} +- {{ item }} +{% endfor %} +{% endif %} +staticPodPath: {{ kube_manifest_dir }} +cgroupDriver: {{ kubelet_cgroup_driver | default('systemd') }} +containerLogMaxFiles: {{ kubelet_logfiles_max_nr }} +containerLogMaxSize: {{ kubelet_logfiles_max_size }} +maxPods: {{ kubelet_max_pods }} +podPidsLimit: {{ kubelet_pod_pids_limit }} +address: {{ kubelet_bind_address }} +readOnlyPort: {{ kube_read_only_port }} +healthzPort: {{ kubelet_healthz_port }} +healthzBindAddress: {{ kubelet_healthz_bind_address }} +kubeletCgroups: {{ kubelet_kubelet_cgroups }} +clusterDomain: {{ dns_domain }} +{% if kubelet_protect_kernel_defaults|bool %} +protectKernelDefaults: true +{% endif %} +{% if kubelet_rotate_certificates|bool %} +rotateCertificates: true +{% endif %} +{% if kubelet_rotate_server_certificates|bool %} +serverTLSBootstrap: true +{% endif %} +{# DNS settings for kubelet #} +{% if enable_nodelocaldns %} +{% set kubelet_cluster_dns = [nodelocaldns_ip] %} +{% elif dns_mode in ['coredns'] %} +{% set kubelet_cluster_dns = [skydns_server] %} +{% elif dns_mode == 'coredns_dual' %} +{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %} +{% elif dns_mode == 'manual' %} +{% set kubelet_cluster_dns = [manual_dns_server] %} +{% else %} +{% set kubelet_cluster_dns = [] %} +{% endif %} +clusterDNS: +{% for dns_address in kubelet_cluster_dns %} +- {{ dns_address }} +{% endfor %} +{# Node reserved CPU/memory #} +kubeReserved: +{% if is_kube_master|bool %} + cpu: {{ kube_master_cpu_reserved }} + memory: {{ kube_master_memory_reserved }} +{% if kube_master_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ kube_master_ephemeral_storage_reserved }} +{% endif %} +{% if kube_master_pid_reserved is defined %} + pid: "{{ kube_master_pid_reserved }}" +{% endif %} +{% else %} + cpu: {{ kube_cpu_reserved }} + memory: {{ kube_memory_reserved }} +{% if kube_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ kube_ephemeral_storage_reserved }} +{% endif %} +{% if kube_pid_reserved is defined %} + pid: "{{ kube_pid_reserved }}" +{% endif %} +{% endif %} +{% if system_reserved is defined and system_reserved %} +systemReserved: +{% if is_kube_master|bool %} + cpu: {{ system_master_cpu_reserved }} + memory: {{ system_master_memory_reserved }} +{% if system_master_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ system_master_ephemeral_storage_reserved }} +{% endif %} +{% if system_master_pid_reserved is defined %} + pid: "{{ system_master_pid_reserved }}" +{% endif %} +{% else %} + cpu: {{ system_cpu_reserved }} + memory: {{ system_memory_reserved }} +{% if system_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ system_ephemeral_storage_reserved }} +{% endif %} +{% if system_pid_reserved is defined %} + pid: "{{ system_pid_reserved }}" +{% endif %} +{% endif %} +{% endif %} +{% if is_kube_master|bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %} +evictionHard: + {{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }} +{% elif not is_kube_master|bool and eviction_hard is defined and eviction_hard %} +evictionHard: + {{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }} +{% endif %} +resolvConf: "{{ kube_resolv_conf }}" +{% if kubelet_config_extra_args %} +{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }} +{% endif %} +{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %} +{{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }} +{% endif %} +{% if kubelet_feature_gates or kube_feature_gates %} +featureGates: +{% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} +{% if tls_min_version is defined %} +tlsMinVersion: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} +tlsCipherSuites: +{% for tls in tls_cipher_suites %} +- {{ tls }} +{% endfor %} +{% endif %} +{% if kubelet_event_record_qps %} +eventRecordQPS: {{ kubelet_event_record_qps }} +{% endif %} +shutdownGracePeriod: {{ kubelet_shutdown_grace_period }} +shutdownGracePeriodCriticalPods: {{ kubelet_shutdown_grace_period_critical_pods }} +{% if not kubelet_fail_swap_on|default(true) %} +memorySwap: + swapBehavior: {{ kubelet_swap_behavior|default("LimitedSwap") }} +{% endif %} +{% if kubelet_streaming_connection_idle_timeout is defined %} +streamingConnectionIdleTimeout: {{ kubelet_streaming_connection_idle_timeout }} +{% endif %} +{% if kubelet_make_iptables_util_chains is defined %} +makeIPTablesUtilChains: {{ kubelet_make_iptables_util_chains | bool }} +{% endif %} +{% if kubelet_seccomp_default is defined %} +seccompDefault: {{ kubelet_seccomp_default | bool }} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 new file mode 100644 index 0000000..9397d7a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 @@ -0,0 +1,43 @@ +KUBE_LOGTOSTDERR="--logtostderr=true" +KUBE_LOG_LEVEL="--v={{ kube_log_level }}" +KUBELET_ADDRESS="--node-ip={{ kubelet_address }}" +{% if kube_override_hostname|default('') %} +KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" +{% endif %} + +{# Base kubelet args #} +{% set kubelet_args_base -%} +{# start kubeadm specific settings #} +--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ +--config={{ kube_config_dir }}/kubelet-config.yaml \ +--kubeconfig={{ kube_config_dir }}/kubelet.conf \ +{# end kubeadm specific settings #} +--container-runtime=remote \ +--container-runtime-endpoint={{ cri_socket }} \ +--runtime-cgroups={{ kubelet_runtime_cgroups }} \ +{% endset %} + +{# Kubelet node taints for gpu #} +{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %} +{% if inventory_hostname in nvidia_gpu_nodes and node_taints is defined %} +{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %} +{% elif inventory_hostname in nvidia_gpu_nodes and node_taints is not defined %} +{% set node_taints = [] %} +{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %} +{% endif %} +{% endif %} + +KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}" +{% if kubelet_flexvolumes_plugins_dir is defined %} +KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}" +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin == "cloud" %} +KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet" +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce", "external"] %} +KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config" +{% else %} +KUBELET_CLOUDPROVIDER="" +{% endif %} + +PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet.service.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet.service.j2 new file mode 100644 index 0000000..feb8374 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/kubelet.service.j2 @@ -0,0 +1,34 @@ +[Unit] +Description=Kubernetes Kubelet Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After={{ container_manager }}.service +{% if container_manager == 'docker' %} +Wants=docker.socket +{% else %} +Wants={{ container_manager }}.service +{% endif %} + +[Service] +EnvironmentFile=-{{ kube_config_dir }}/kubelet.env +ExecStart={{ bin_dir }}/kubelet \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBELET_API_SERVER \ + $KUBELET_ADDRESS \ + $KUBELET_PORT \ + $KUBELET_HOSTNAME \ + $KUBELET_ARGS \ + $DOCKER_SOCKET \ + $KUBELET_NETWORK_PLUGIN \ + $KUBELET_VOLUME_PLUGIN \ + $KUBELET_CLOUDPROVIDER +Restart=always +RestartSec=10s +{% if kubelet_systemd_hardening %} +# Hardening setup +IPAddressDeny=any +IPAddressAllow={{ kubelet_secure_addresses }} +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 new file mode 100644 index 0000000..1d5d7d9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 @@ -0,0 +1,43 @@ +global + maxconn 4000 + log 127.0.0.1 local0 + +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option redispatch + retries 5 + timeout http-request 5m + timeout queue 5m + timeout connect 30s + timeout client {{ loadbalancer_apiserver_keepalive_timeout }} + timeout server 15m + timeout http-keep-alive 30s + timeout check 30s + maxconn 4000 + +{% if loadbalancer_apiserver_healthcheck_port is defined -%} +frontend healthz + bind *:{{ loadbalancer_apiserver_healthcheck_port }} + mode http + monitor-uri /healthz +{% endif %} + +frontend kube_api_frontend + bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} + mode tcp + option tcplog + default_backend kube_api_backend + +backend kube_api_backend + mode tcp + balance leastconn + default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100 + option httpchk GET /healthz + http-check expect status 200 + {% for host in groups['kube_control_plane'] -%} + server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none + {% endfor -%} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 new file mode 100644 index 0000000..fd3e574 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 @@ -0,0 +1,60 @@ +error_log stderr notice; + +worker_processes 2; +worker_rlimit_nofile 130048; +worker_shutdown_timeout 10s; + +events { + multi_accept on; + use epoll; + worker_connections 16384; +} + +stream { + upstream kube_apiserver { + least_conn; + {% for host in groups['kube_control_plane'] -%} + server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }}; + {% endfor -%} + } + + server { + listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; + {% if enable_dual_stack_networks -%} + listen [::]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; + {% endif -%} + proxy_pass kube_apiserver; + proxy_timeout 10m; + proxy_connect_timeout 1s; + } +} + +http { + aio threads; + aio_write on; + tcp_nopush on; + tcp_nodelay on; + + keepalive_timeout {{ loadbalancer_apiserver_keepalive_timeout }}; + keepalive_requests 100; + reset_timedout_connection on; + server_tokens off; + autoindex off; + + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + server { + listen {{ loadbalancer_apiserver_healthcheck_port }}; + {% if enable_dual_stack_networks -%} + listen [::]:{{ loadbalancer_apiserver_healthcheck_port }}; + {% endif -%} + location /healthz { + access_log off; + return 200; + } + location /stub_status { + stub_status on; + access_log off; + } + } + {% endif %} +} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 new file mode 100644 index 0000000..1efcbae --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: haproxy + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-haproxy + annotations: + haproxy-cfg-checksum: "{{ haproxy_stat.stat.checksum }}" +spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + containers: + - name: haproxy + image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ loadbalancer_apiserver_cpu_requests }} + memory: {{ loadbalancer_apiserver_memory_requests }} + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + livenessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + {% endif -%} + volumeMounts: + - mountPath: /usr/local/etc/haproxy/ + name: etc-haproxy + readOnly: true + volumes: + - name: etc-haproxy + hostPath: + path: {{ haproxy_config_dir }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 new file mode 100644 index 0000000..02887cf --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 @@ -0,0 +1,93 @@ +# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.5.5/pkg/kubevip/config_generator.go#L13 +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: {{ kube_vip_arp_enabled | string | to_json }} + - name: port + value: {{ kube_apiserver_port | string | to_json }} +{% if kube_vip_interface %} + - name: vip_interface + value: {{ kube_vip_interface | string | to_json }} +{% endif %} +{% if kube_vip_services_interface %} + - name: vip_servicesinterface + value: {{ kube_vip_services_interface | string | to_json }} +{% endif %} +{% if kube_vip_cidr %} + - name: vip_cidr + value: {{ kube_vip_cidr | string | to_json }} +{% endif %} +{% if kube_vip_controlplane_enabled %} + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: {{ kube_vip_ddns_enabled | string | to_json }} +{% endif %} +{% if kube_vip_services_enabled %} + - name: svc_enable + value: "true" +{% endif %} +{% if kube_vip_leader_election_enabled %} + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" +{% endif %} +{% if kube_vip_bgp_enabled %} + - name: bgp_enable + value: "true" + - name: bgp_routerid + value: {{ kube_vip_bgp_routerid | string | to_json }} + - name: bgp_as + value: {{ kube_vip_local_as | string | to_json }} + - name: bgp_peeraddress + value: {{ kube_vip_bgp_peeraddress | to_json }} + - name: bgp_peerpass + value: {{ kube_vip_bgp_peerpass | to_json }} + - name: bgp_peeras + value: {{ kube_vip_bgp_peeras | string | to_json }} +{% if kube_vip_bgppeers %} + - name: bgp_peers + value: {{ kube_vip_bgppeers | join(',') | to_json }} +{% endif %} +{% endif %} + - name: address + value: {{ kube_vip_address | to_json }} + image: {{ kube_vip_image_repo }}:{{ kube_vip_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} + diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 new file mode 100644 index 0000000..04b9b73 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx-proxy + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-nginx + annotations: + nginx-cfg-checksum: "{{ nginx_stat.stat.checksum }}" +spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + containers: + - name: nginx-proxy + image: {{ nginx_image_repo }}:{{ nginx_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ loadbalancer_apiserver_cpu_requests }} + memory: {{ loadbalancer_apiserver_memory_requests }} + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + livenessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + {% endif -%} + volumeMounts: + - mountPath: /etc/nginx + name: etc-nginx + readOnly: true + volumes: + - name: etc-nginx + hostPath: + path: {{ nginx_config_dir }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 b/kubespray/extra_playbooks/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 new file mode 100644 index 0000000..4b8af60 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem + client-key: {{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-{{ cluster_name }} +current-context: kubelet-{{ cluster_name }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/vars/fedora.yml b/kubespray/extra_playbooks/roles/kubernetes/node/vars/fedora.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/vars/fedora.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-18.yml b/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-18.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-18.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-20.yml b/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-20.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-20.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-22.yml b/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-22.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/node/vars/ubuntu-22.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/defaults/main.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/defaults/main.yml new file mode 100644 index 0000000..5537b52 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/defaults/main.yml @@ -0,0 +1,107 @@ +--- +# Set to true to allow pre-checks to fail and continue deployment +ignore_assert_errors: false + +epel_enabled: false +# Kubespray sets this to true after clusterDNS is running to apply changes to the host resolv.conf +dns_late: false + +common_required_pkgs: + - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1', 'openssl') }}" + - curl + - rsync + - socat + - unzip + - e2fsprogs + - xfsprogs + - ebtables + - bash-completion + - tar + +# Set to true if your network does not support IPv6 +# This maybe necessary for pulling Docker images from +# GCE docker repository +disable_ipv6_dns: false + +kube_owner: kube +kube_cert_group: kube-cert +kube_config_dir: /etc/kubernetes +kube_cert_dir: "{{ kube_config_dir }}/ssl" +kube_cert_compat_dir: /etc/kubernetes/pki +kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +# Flatcar Container Linux by Kinvolk cloud init config file to define /etc/resolv.conf content +# for hostnet pods and infra needs +resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf + +# All inventory hostnames will be written into each /etc/hosts file. +populate_inventory_to_hosts_file: true +# K8S Api FQDN will be written into /etc/hosts file. +populate_loadbalancer_apiserver_to_hosts_file: true + +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +etc_hosts_localhost_entries: + 127.0.0.1: + expected: + - localhost + - localhost.localdomain + ::1: + expected: + - localhost6 + - localhost6.localdomain + unexpected: + - localhost + - localhost.localdomain + +# Minimal memory requirement in MB for safety checks +minimal_node_memory_mb: 1024 +minimal_master_memory_mb: 1500 + +yum_repo_dir: /etc/yum.repos.d + +# number of times package install task should be retried +pkg_install_retries: 4 + +# Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +ping_access_ip: true + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +# The package to install which provides NTP functionality. +# The default is ntp for most platforms, or chrony on RHEL/CentOS 7 and later. +# The ntp_package can be one of ['ntp','chrony'] +ntp_package: >- + {% if ansible_os_family == "RedHat" -%} + chrony + {%- else -%} + ntp + {%- endif -%} + +# Manage the NTP configuration file. +ntp_manage_config: false +# Specify the NTP servers +# Only takes effect when ntp_manage_config is true. +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" +# Restrict NTP access to these hosts. +# Only takes effect when ntp_manage_config is true. +ntp_restrict: + - "127.0.0.1" + - "::1" +# The NTP driftfile path +# Only takes effect when ntp_manage_config is true. +ntp_driftfile: /var/lib/ntp/ntp.drift +# Enable tinker panic is useful when running NTP in a VM environment. +# Only takes effect when ntp_manage_config is true. +ntp_tinker_panic: false + +# Force sync time immediately after the ntp installed, which is useful in in newly installed system. +ntp_force_sync_immediately: false + +# Set the timezone for your server. eg: "Etc/UTC","Etc/GMT-8". If not set, the timezone will not change. +ntp_timezone: "" diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/files/dhclient_nodnsupdate b/kubespray/extra_playbooks/roles/kubernetes/preinstall/files/dhclient_nodnsupdate new file mode 100644 index 0000000..03c7c99 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/files/dhclient_nodnsupdate @@ -0,0 +1,4 @@ +#!/bin/sh +make_resolv_conf() { + : +} diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/gen-gitinfos.sh b/kubespray/extra_playbooks/roles/kubernetes/preinstall/gen-gitinfos.sh new file mode 100755 index 0000000..bfab5a4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/gen-gitinfos.sh @@ -0,0 +1,73 @@ +#!/bin/sh +set -e + +# Text color variables +txtbld=$(tput bold) # Bold +bldred=${txtbld}$(tput setaf 1) # red +bldgre=${txtbld}$(tput setaf 2) # green +bldylw=${txtbld}$(tput setaf 3) # yellow +txtrst=$(tput sgr0) # Reset +err=${bldred}ERROR${txtrst} +info=${bldgre}INFO${txtrst} +warn=${bldylw}WARNING${txtrst} + +usage() +{ + cat << EOF +Generates a file which contains useful git informations + +Usage : $(basename $0) [global|diff] + ex : + Generate git information + $(basename $0) global + Generate diff from latest tag + $(basename $0) diff +EOF +} + +if [ $# != 1 ]; then + printf "\n$err : Needs 1 argument\n" + usage + exit 2 +fi; + +current_commit=$(git rev-parse HEAD) +latest_tag=$(git describe --abbrev=0 --tags) +latest_tag_commit=$(git show-ref -s ${latest_tag}) +tags_list=$(git tag --points-at "${latest_tag}") + +case ${1} in + "global") +cat<=') + msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}" + when: not ignore_assert_errors + +# simplify this items-list when https://github.com/ansible/ansible/issues/15753 is resolved +- name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")" + assert: + that: item.value|type_debug == 'bool' + msg: "{{ item.value }} isn't a bool" + run_once: yes + with_items: + - { name: download_run_once, value: "{{ download_run_once }}" } + - { name: deploy_netchecker, value: "{{ deploy_netchecker }}" } + - { name: download_always_pull, value: "{{ download_always_pull }}" } + - { name: helm_enabled, value: "{{ helm_enabled }}" } + - { name: openstack_lbaas_enabled, value: "{{ openstack_lbaas_enabled }}" } + when: not ignore_assert_errors + +- name: Stop if even number of etcd hosts + assert: + that: groups.etcd|length is not divisibleby 2 + when: + - not ignore_assert_errors + - inventory_hostname in groups.get('etcd',[]) + +- name: Stop if memory is too small for masters + assert: + that: ansible_memtotal_mb >= minimal_master_memory_mb + when: + - not ignore_assert_errors + - inventory_hostname in groups['kube_control_plane'] + +- name: Stop if memory is too small for nodes + assert: + that: ansible_memtotal_mb >= minimal_node_memory_mb + when: + - not ignore_assert_errors + - inventory_hostname in groups['kube_node'] + +# This assertion will fail on the safe side: One can indeed schedule more pods +# on a node than the CIDR-range has space for when additional pods use the host +# network namespace. It is impossible to ascertain the number of such pods at +# provisioning time, so to establish a guarantee, we factor these out. +# NOTICE: the check blatantly ignores the inet6-case +- name: Guarantee that enough network address space is available for all pods + assert: + that: "{{ (kubelet_max_pods | default(110)) | int <= (2 ** (32 - kube_network_node_prefix | int)) - 2 }}" + msg: "Do not schedule more pods on a node than inet addresses are available." + when: + - not ignore_assert_errors + - inventory_hostname in groups['k8s_cluster'] + - kube_network_node_prefix is defined + - kube_network_plugin != 'calico' + +- name: Stop if ip var does not match local ips + assert: + that: (ip in ansible_all_ipv4_addresses) or (ip in ansible_all_ipv6_addresses) + msg: "IPv4: '{{ ansible_all_ipv4_addresses }}' and IPv6: '{{ ansible_all_ipv6_addresses }}' do not contain '{{ ip }}'" + when: + - not ignore_assert_errors + - ip is defined + +- name: Ensure ping package + package: + name: >- + {%- if ansible_os_family == 'Debian' -%} + iputils-ping + {%- else -%} + iputils + {%- endif -%} + state: present + when: + - access_ip is defined + - not ignore_assert_errors + - ping_access_ip + - not is_fedora_coreos + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Stop if access_ip is not pingable + command: ping -c1 {{ access_ip }} + when: + - access_ip is defined + - not ignore_assert_errors + - ping_access_ip + +- name: Stop if RBAC is not enabled when dashboard is enabled + assert: + that: rbac_enabled + when: + - dashboard_enabled + - not ignore_assert_errors + +- name: Stop if RBAC is not enabled when OCI cloud controller is enabled + assert: + that: rbac_enabled + when: + - cloud_provider is defined and cloud_provider == "oci" + - not ignore_assert_errors + +- name: Stop if kernel version is too low + assert: + that: ansible_kernel.split('-')[0] is version('4.9.17', '>=') + when: + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + - not ignore_assert_errors + +- name: Stop if bad hostname + assert: + that: inventory_hostname is match("[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character" + when: not ignore_assert_errors + +- name: check cloud_provider value + assert: + that: cloud_provider in ['gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external'] + msg: "If set the 'cloud_provider' var must be set either to 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci' or 'external'" + when: + - cloud_provider is defined + - not ignore_assert_errors + tags: + - cloud-provider + - facts + +- name: "Check that kube_service_addresses is a network range" + assert: + that: + - kube_service_addresses | ipaddr('net') + msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range" + run_once: yes + +- name: "Check that kube_pods_subnet is a network range" + assert: + that: + - kube_pods_subnet | ipaddr('net') + msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range" + run_once: yes + +- name: "Check that kube_pods_subnet does not collide with kube_service_addresses" + assert: + that: + - kube_pods_subnet | ipaddr(kube_service_addresses) | string == 'None' + msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses" + run_once: yes + +- name: "Check that IP range is enough for the nodes" + assert: + that: + - 2 ** (kube_network_node_prefix - kube_pods_subnet | ipaddr('prefix')) >= groups['k8s_cluster'] | length + msg: "Not enough IPs are available for the desired node count." + when: kube_network_plugin != 'calico' + run_once: yes + +- name: Stop if unknown dns mode + assert: + that: dns_mode in ['coredns', 'coredns_dual', 'manual', 'none'] + msg: "dns_mode can only be 'coredns', 'coredns_dual', 'manual' or 'none'" + when: dns_mode is defined + run_once: true + +- name: Stop if unknown kube proxy mode + assert: + that: kube_proxy_mode in ['iptables', 'ipvs'] + msg: "kube_proxy_mode can only be 'iptables' or 'ipvs'" + when: kube_proxy_mode is defined + run_once: true + +- name: Stop if unknown cert_management + assert: + that: cert_management|d('script') in ['script', 'none'] + msg: "cert_management can only be 'script' or 'none'" + run_once: true + +- name: Stop if unknown resolvconf_mode + assert: + that: resolvconf_mode in ['docker_dns', 'host_resolvconf', 'none'] + msg: "resolvconf_mode can only be 'docker_dns', 'host_resolvconf' or 'none'" + when: resolvconf_mode is defined + run_once: true + +- name: Stop if etcd deployment type is not host, docker or kubeadm + assert: + that: etcd_deployment_type in ['host', 'docker', 'kubeadm'] + msg: "The etcd deployment type, 'etcd_deployment_type', must be host, docker or kubeadm" + when: + - inventory_hostname in groups.get('etcd',[]) + +- name: Stop if container manager is not docker, crio or containerd + assert: + that: container_manager in ['docker', 'crio', 'containerd'] + msg: "The container manager, 'container_manager', must be docker, crio or containerd" + run_once: true + +- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker + assert: + that: etcd_deployment_type in ['host', 'kubeadm'] + msg: "The etcd deployment type, 'etcd_deployment_type', must be host or kubeadm when container_manager is not docker" + when: + - inventory_hostname in groups.get('etcd',[]) + - container_manager != 'docker' + +# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled` +- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined + block: + - name: Warn the user if they are still using `etcd_kubeadm_enabled` + debug: + msg: > + "WARNING! => `etcd_kubeadm_enabled` is deprecated and will be removed in a future release. + You can set `etcd_deployment_type` to `kubeadm` instead of setting `etcd_kubeadm_enabled` to `true`." + changed_when: true + + - name: Stop if `etcd_kubeadm_enabled` is defined and `etcd_deployment_type` is not `kubeadm` or `host` + assert: + that: etcd_deployment_type == 'kubeadm' + msg: > + It is not possible to use `etcd_kubeadm_enabled` when `etcd_deployment_type` is set to {{ etcd_deployment_type }}. + Unset the `etcd_kubeadm_enabled` variable and set `etcd_deployment_type` to desired deployment type (`host`, `kubeadm`, `docker`) instead." + when: etcd_kubeadm_enabled + run_once: yes + when: etcd_kubeadm_enabled is defined + +- name: Stop if download_localhost is enabled but download_run_once is not + assert: + that: download_run_once + msg: "download_localhost requires enable download_run_once" + when: download_localhost + +- name: Stop if kata_containers_enabled is enabled when container_manager is docker + assert: + that: container_manager != 'docker' + msg: "kata_containers_enabled support only for containerd and crio-o. See https://github.com/kata-containers/documentation/blob/1.11.4/how-to/run-kata-with-k8s.md#install-a-cri-implementation for details" + when: kata_containers_enabled + +- name: Stop if gvisor_enabled is enabled when container_manager is not containerd + assert: + that: container_manager == 'containerd' + msg: "gvisor_enabled support only compatible with containerd. See https://github.com/kubernetes-sigs/kubespray/issues/7650 for details" + when: gvisor_enabled + +- name: Stop if download_localhost is enabled for Flatcar Container Linux + assert: + that: ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + msg: "download_run_once not supported for Flatcar Container Linux" + when: download_run_once or download_force_cache + +- name: Ensure minimum containerd version + assert: + that: containerd_version is version(containerd_min_version_required, '>=') + msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}" + run_once: yes + when: + - containerd_version not in ['latest', 'edge', 'stable'] + - container_manager == 'containerd' + +- name: Stop if using deprecated containerd_config variable + assert: + that: containerd_config is not defined + msg: "Variable containerd_config is now deprecated. See https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/group_vars/all/containerd.yml for details." + when: + - containerd_config is defined + - not ignore_assert_errors + +- name: Stop if auto_renew_certificates is enabled when certificates are managed externally (kube_external_ca_mode is true) + assert: + that: not auto_renew_certificates + msg: "Variable auto_renew_certificates must be disabled when CA are managed externally: kube_external_ca_mode = true" + when: + - kube_external_ca_mode + - not ignore_assert_errors + +- name: Stop if using deprecated comma separated list for admission plugins + assert: + that: "',' not in kube_apiserver_enable_admission_plugins[0]" + msg: "Comma-separated list for kube_apiserver_enable_admission_plugins is now deprecated, use separate list items for each plugin." + when: + - kube_apiserver_enable_admission_plugins is defined + - kube_apiserver_enable_admission_plugins | length > 0 diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0040-set_facts.yml new file mode 100644 index 0000000..3ae8412 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -0,0 +1,279 @@ +--- +- name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - facts + +- name: Set os_family fact for Kylin Linux Advanced Server and openEuler + set_fact: + ansible_os_family: "RedHat" + ansible_distribution_major_version: "8" + when: ansible_distribution in ["Kylin Linux Advanced Server", "openEuler"] + tags: + - facts + +- name: check if booted with ostree + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: set is_fedora_coreos + lineinfile: + path: /etc/os-release + line: "VARIANT_ID=coreos" + state: present + check_mode: yes + register: os_variant_coreos + changed_when: false + +- name: set is_fedora_coreos + set_fact: + is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}" + +- name: check resolvconf + command: which resolvconf + register: resolvconf + failed_when: false + changed_when: false + check_mode: no + +- name: check existence of /etc/resolvconf/resolv.conf.d + stat: + path: /etc/resolvconf/resolv.conf.d + get_attributes: no + get_checksum: no + get_mime: no + failed_when: false + register: resolvconfd_path + +- name: check status of /etc/resolv.conf + stat: + path: /etc/resolv.conf + follow: no + get_attributes: no + get_checksum: no + get_mime: no + failed_when: false + register: resolvconf_stat + +- block: + + - name: get content of /etc/resolv.conf + slurp: + src: /etc/resolv.conf + register: resolvconf_slurp + + - name: get currently configured nameservers + set_fact: + configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}" + when: resolvconf_slurp.content is defined + + when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists + +- name: Stop if /etc/resolv.conf not configured nameservers + assert: + that: configured_nameservers|length>0 + fail_msg: "nameserver should not empty in /etc/resolv.conf" + when: + - not ignore_assert_errors + - configured_nameservers is defined + - not (upstream_dns_servers is defined and upstream_dns_servers|length > 0) + - not (disable_host_nameservers | default(false)) + +- name: NetworkManager | Check if host has NetworkManager + # noqa 303 Should we use service_facts for this? + command: systemctl is-active --quiet NetworkManager.service + register: networkmanager_enabled + failed_when: false + changed_when: false + check_mode: false + +- name: check systemd-resolved + # noqa 303 Should we use service_facts for this? + command: systemctl is-active systemd-resolved + register: systemd_resolved_enabled + failed_when: false + changed_when: false + check_mode: no + +- name: set default dns if remove_default_searchdomains is false + set_fact: + default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] + when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) + +- name: set dns facts + set_fact: + resolvconf: >- + {%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%} + bogus_domains: |- + {% for d in default_searchdomains|default([]) + searchdomains|default([]) -%} + {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./ + {%- endfor %} + cloud_resolver: "{{ ['169.254.169.254'] if cloud_provider is defined and cloud_provider == 'gce' else + ['169.254.169.253'] if cloud_provider is defined and cloud_provider == 'aws' else + [] }}" + +- name: check if kubelet is configured + stat: + path: "{{ kube_config_dir }}/kubelet.env" + get_attributes: no + get_checksum: no + get_mime: no + register: kubelet_configured + changed_when: false + +- name: check if early DNS configuration stage + set_fact: + dns_early: "{{ not kubelet_configured.stat.exists }}" + +- name: target resolv.conf files + set_fact: + resolvconffile: /etc/resolv.conf + base: >- + {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%} + head: >- + {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%} + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos + +- name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS) + set_fact: + resolvconffile: /tmp/resolveconf_cloud_init_conf + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] or is_fedora_coreos + +- name: check if /etc/dhclient.conf exists + stat: + path: /etc/dhclient.conf + get_attributes: no + get_checksum: no + get_mime: no + register: dhclient_stat + +- name: target dhclient conf file for /etc/dhclient.conf + set_fact: + dhclientconffile: /etc/dhclient.conf + when: dhclient_stat.stat.exists + +- name: check if /etc/dhcp/dhclient.conf exists + stat: + path: /etc/dhcp/dhclient.conf + get_attributes: no + get_checksum: no + get_mime: no + register: dhcp_dhclient_stat + +- name: target dhclient conf file for /etc/dhcp/dhclient.conf + set_fact: + dhclientconffile: /etc/dhcp/dhclient.conf + when: dhcp_dhclient_stat.stat.exists + +- name: target dhclient hook file for Red Hat family + set_fact: + dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh + when: ansible_os_family == "RedHat" + +- name: target dhclient hook file for Debian family + set_fact: + dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate + when: ansible_os_family == "Debian" + +- name: generate search domains to resolvconf + set_fact: + searchentries: + search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }} + domainentry: + domain {{ dns_domain }} + supersede_search: + supersede domain-search "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join('", "') }}"; + supersede_domain: + supersede domain-name "{{ dns_domain }}"; + +- name: pick coredns cluster IP or default resolver + set_fact: + coredns_server: |- + {%- if dns_mode == 'coredns' and not dns_early|bool -%} + {{ [ skydns_server ] }} + {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%} + {{ [ skydns_server ] + [ skydns_server_secondary ] }} + {%- elif dns_mode == 'manual' and not dns_early|bool -%} + {{ ( manual_dns_server.split(',') | list) }} + {%- elif dns_mode == 'none' and not dns_early|bool -%} + [] + {%- elif dns_early|bool -%} + {{ upstream_dns_servers|default([]) }} + {%- endif -%} + +# This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout +- name: generate nameservers for resolvconf, including cluster DNS + set_fact: + nameserverentries: |- + {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }} + supersede_nameserver: + supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + when: not dns_early or dns_late + +# This task should run instead of the above task when cluster/nodelocal DNS hasn't +# been deployed yet (like scale.yml/cluster.yml) or when it's down (reset.yml) +- name: generate nameservers for resolvconf, not including cluster DNS + set_fact: + nameserverentries: |- + {{ ( nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }} + supersede_nameserver: + supersede domain-name-servers {{ ( nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + when: dns_early and not dns_late + +- name: gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + +- name: set etcd vars if using kubeadm mode + set_fact: + etcd_cert_dir: "{{ kube_cert_dir }}" + kube_etcd_cacert_file: "etcd/ca.crt" + kube_etcd_cert_file: "apiserver-etcd-client.crt" + kube_etcd_key_file: "apiserver-etcd-client.key" + when: + - etcd_deployment_type == "kubeadm" + +- name: check /usr readonly + stat: + path: "/usr" + get_attributes: no + get_checksum: no + get_mime: no + register: usr + +- name: set alternate flexvolume path + set_fact: + kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins + when: not usr.stat.writeable + +- block: + - name: Ensure IPv6DualStack featureGate is set when enable_dual_stack_networks is true + set_fact: + kube_feature_gates: "{{ kube_feature_gates + [ 'IPv6DualStack=true' ] }}" + when: + - not 'IPv6DualStack=true' in kube_feature_gates + + - name: Ensure IPv6DualStack kubeadm featureGate is set when enable_dual_stack_networks is true + set_fact: + kubeadm_feature_gates: "{{ kubeadm_feature_gates + [ 'IPv6DualStack=true' ] }}" + when: + - not 'IPv6DualStack=true' in kubeadm_feature_gates + when: + - enable_dual_stack_networks + - kube_version is version('v1.24.0', '<') diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0050-create_directories.yml new file mode 100644 index 0000000..35d7e04 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0050-create_directories.yml @@ -0,0 +1,105 @@ +--- +- name: Create kubernetes directories + file: + path: "{{ item }}" + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + when: inventory_hostname in groups['k8s_cluster'] + become: true + tags: + - kubelet + - k8s-secrets + - kube-controller-manager + - kube-apiserver + - bootstrap-os + - apps + - network + - master + - node + with_items: + - "{{ kube_config_dir }}" + - "{{ kube_cert_dir }}" + - "{{ kube_manifest_dir }}" + - "{{ kube_script_dir }}" + - "{{ kubelet_flexvolumes_plugins_dir }}" + +- name: Create other directories + file: + path: "{{ item }}" + state: directory + owner: root + mode: 0755 + when: inventory_hostname in groups['k8s_cluster'] + become: true + tags: + - kubelet + - k8s-secrets + - kube-controller-manager + - kube-apiserver + - bootstrap-os + - apps + - network + - master + - node + with_items: + - "{{ bin_dir }}" + +- name: Check if kubernetes kubeadm compat cert dir exists + stat: + path: "{{ kube_cert_compat_dir }}" + get_attributes: no + get_checksum: no + get_mime: no + register: kube_cert_compat_dir_check + when: + - inventory_hostname in groups['k8s_cluster'] + - kube_cert_dir != kube_cert_compat_dir + +- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498) + file: + src: "{{ kube_cert_dir }}" + dest: "{{ kube_cert_compat_dir }}" + state: link + mode: 0755 + when: + - inventory_hostname in groups['k8s_cluster'] + - kube_cert_dir != kube_cert_compat_dir + - not kube_cert_compat_dir_check.stat.exists + +- name: Create cni directories + file: + path: "{{ item }}" + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + with_items: + - "/etc/cni/net.d" + - "/opt/cni/bin" + - "/var/lib/calico" + when: + - kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"] + - inventory_hostname in groups['k8s_cluster'] + tags: + - network + - cilium + - calico + - weave + - canal + - kube-ovn + - kube-router + - bootstrap-os + +- name: Create local volume provisioner directories + file: + path: "{{ local_volume_provisioner_storage_classes[item].host_dir }}" + state: directory + owner: root + group: root + mode: "{{ local_volume_provisioner_directory_mode }}" + with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}" + when: + - inventory_hostname in groups['k8s_cluster'] + - local_volume_provisioner_enabled + tags: + - persistent_volumes diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml new file mode 100644 index 0000000..4397cdd --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml @@ -0,0 +1,58 @@ +--- +- name: create temporary resolveconf cloud init file + command: cp -f /etc/resolv.conf "{{ resolvconffile }}" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Add domain/search/nameservers/options to resolv.conf + blockinfile: + path: "{{ resolvconffile }}" + block: |- + {% for item in [domainentry] + [searchentries] -%} + {{ item }} + {% endfor %} + {% for item in nameserverentries.split(',') %} + nameserver {{ item }} + {% endfor %} + options ndots:{{ ndots }} timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} + state: present + insertbefore: BOF + create: yes + backup: "{{ not resolvconf_stat.stat.islnk }}" + marker: "# Ansible entries {mark}" + mode: 0644 + notify: Preinstall | propagate resolvconf to k8s components + +- name: Remove search/domain/nameserver options before block + replace: + path: "{{ item[0] }}" + regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)' + backup: "{{ not resolvconf_stat.stat.islnk }}" + with_nested: + - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}" + - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] + notify: Preinstall | propagate resolvconf to k8s components + +- name: Remove search/domain/nameserver options after block + replace: + path: "{{ item[0] }}" + regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+' + replace: '\1' + backup: "{{ not resolvconf_stat.stat.islnk }}" + with_nested: + - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}" + - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] + notify: Preinstall | propagate resolvconf to k8s components + +- name: get temporary resolveconf cloud init file content + command: cat {{ resolvconffile }} + register: cloud_config + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: persist resolvconf cloud init file + template: + dest: "{{ resolveconf_cloud_init_conf }}" + src: resolvconf.j2 + owner: root + mode: 0644 + notify: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml new file mode 100644 index 0000000..3811358 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml @@ -0,0 +1,9 @@ +--- +- name: Write resolved.conf + template: + src: resolved.conf.j2 + dest: /etc/systemd/resolved.conf + owner: root + group: root + mode: 0644 + notify: Preinstall | Restart systemd-resolved diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml new file mode 100644 index 0000000..1cd56d4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml @@ -0,0 +1,28 @@ +--- +- name: NetworkManager | Ensure NetworkManager conf.d dir + file: + path: "/etc/NetworkManager/conf.d" + state: directory + recurse: yes + +- name: NetworkManager | Prevent NetworkManager from managing Calico interfaces (cali*/tunl*/vxlan.calico) + copy: + content: | + [keyfile] + unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico + dest: /etc/NetworkManager/conf.d/calico.conf + mode: 0644 + when: + - kube_network_plugin == "calico" + notify: Preinstall | reload NetworkManager + +# TODO: add other network_plugin interfaces + +- name: NetworkManager | Prevent NetworkManager from managing K8S interfaces (kube-ipvs0/nodelocaldns) + copy: + content: | + [keyfile] + unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns + dest: /etc/NetworkManager/conf.d/k8s.conf + mode: 0644 + notify: Preinstall | reload NetworkManager diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml new file mode 100644 index 0000000..f245814 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml @@ -0,0 +1,35 @@ +--- +- name: NetworkManager | Add nameservers to NM configuration + ini_file: + path: /etc/NetworkManager/conf.d/dns.conf + section: global-dns-domain-* + option: servers + value: "{{ nameserverentries }}" + mode: '0600' + backup: yes + notify: Preinstall | update resolvconf for networkmanager + +- name: set default dns if remove_default_searchdomains is false + set_fact: + default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] + when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) + +- name: NetworkManager | Add DNS search to NM configuration + ini_file: + path: /etc/NetworkManager/conf.d/dns.conf + section: global-dns + option: searches + value: "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join(',') }}" + mode: '0600' + backup: yes + notify: Preinstall | update resolvconf for networkmanager + +- name: NetworkManager | Add DNS options to NM configuration + ini_file: + path: /etc/NetworkManager/conf.d/dns.conf + section: global-dns + option: options + value: "ndots:{{ ndots }};timeout:{{ dns_timeout|default('2') }};attempts:{{ dns_attempts|default('2') }};" + mode: '0600' + backup: yes + notify: Preinstall | update resolvconf for networkmanager diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0070-system-packages.yml new file mode 100644 index 0000000..b4fccfb --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -0,0 +1,98 @@ +--- +- name: Update package management cache (zypper) - SUSE + command: zypper -n --gpg-auto-import-keys ref + register: make_cache_output + until: make_cache_output is succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ansible_pkg_mgr == 'zypper' + tags: bootstrap-os + +- block: + - name: Add Debian Backports apt repo + apt_repository: + repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main" + state: present + filename: debian-backports + + - name: Set libseccomp2 pin priority to apt_preferences on Debian buster + copy: + content: | + Package: libseccomp2 + Pin: release a={{ ansible_distribution_release }}-backports + Pin-Priority: 1001 + dest: "/etc/apt/preferences.d/libseccomp2" + owner: "root" + mode: 0644 + when: + - ansible_distribution == "Debian" + - ansible_distribution_version == "10" + tags: + - bootstrap-os + +- name: Update package management cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: ansible_os_family == "Debian" + tags: + - bootstrap-os + +- name: Remove legacy docker repo file + file: + path: "{{ yum_repo_dir }}/docker.repo" + state: absent + when: + - ansible_os_family == "RedHat" + - not is_fedora_coreos + +- name: Install python3-dnf for latest RedHat versions + command: dnf install -y python3-dnf + register: dnf_task_result + until: dnf_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ansible_distribution == "Fedora" + - ansible_distribution_major_version|int >= 30 + - not is_fedora_coreos + changed_when: False + tags: + - bootstrap-os + +- name: Install epel-release on RHEL derivatives + package: + name: epel-release + state: present + when: + - ansible_os_family == "RedHat" + - not is_fedora_coreos + - epel_enabled|bool + tags: + - bootstrap-os + +- name: Update common_required_pkgs with ipvsadm when kube_proxy_mode is ipvs + set_fact: + common_required_pkgs: "{{ common_required_pkgs|default([]) + ['ipvsadm', 'ipset'] }}" + when: kube_proxy_mode == 'ipvs' + +- name: Install packages requirements + package: + name: "{{ required_pkgs | default([]) | union(common_required_pkgs|default([])) }}" + state: present + register: pkgs_task_result + until: pkgs_task_result is succeeded + retries: "{{ pkg_install_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) + tags: + - bootstrap-os + +- name: Install ipvsadm for ClearLinux + package: + name: ipvsadm + state: present + when: + - ansible_os_family in ["ClearLinux"] + - kube_proxy_mode == 'ipvs' diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml new file mode 100644 index 0000000..dafa47f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -0,0 +1,138 @@ +--- +# Todo : selinux configuration +- name: Confirm selinux deployed + stat: + path: /etc/selinux/config + get_attributes: no + get_checksum: no + get_mime: no + when: + - ansible_os_family == "RedHat" + - "'Amazon' not in ansible_distribution" + register: slc + +- name: Set selinux policy + selinux: + policy: targeted + state: "{{ preinstall_selinux_state }}" + when: + - ansible_os_family == "RedHat" + - "'Amazon' not in ansible_distribution" + - slc.stat.exists + changed_when: False + tags: + - bootstrap-os + +- name: Disable IPv6 DNS lookup + lineinfile: + dest: /etc/gai.conf + line: "precedence ::ffff:0:0/96 100" + state: present + create: yes + backup: yes + mode: 0644 + when: + - disable_ipv6_dns + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - bootstrap-os + +- name: Clean previously used sysctl file locations + file: + path: "/etc/sysctl.d/{{ item }}" + state: absent + with_items: + - ipv4-ip_forward.conf + - bridge-nf-call.conf + +- name: Stat sysctl file configuration + stat: + path: "{{ sysctl_file_path }}" + get_attributes: no + get_checksum: no + get_mime: no + register: sysctl_file_stat + tags: + - bootstrap-os + +- name: Change sysctl file path to link source if linked + set_fact: + sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}" + when: + - sysctl_file_stat.stat.islnk is defined + - sysctl_file_stat.stat.islnk + tags: + - bootstrap-os + +- name: Make sure sysctl file path folder exists + file: + name: "{{ sysctl_file_path | dirname }}" + state: directory + mode: 0755 + +- name: Enable ip forwarding + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: net.ipv4.ip_forward + value: "1" + state: present + reload: yes + +- name: Enable ipv6 forwarding + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: net.ipv6.conf.all.forwarding + value: 1 + state: present + reload: yes + when: enable_dual_stack_networks | bool + +- name: Check if we need to set fs.may_detach_mounts + stat: + path: /proc/sys/fs/may_detach_mounts + get_attributes: no + get_checksum: no + get_mime: no + register: fs_may_detach_mounts + ignore_errors: true # noqa ignore-errors + +- name: Set fs.may_detach_mounts if needed + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: fs.may_detach_mounts + value: 1 + state: present + reload: yes + when: fs_may_detach_mounts.stat.exists | d(false) + +- name: Ensure kube-bench parameters are set + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: "{{ item.name }}" + value: "{{ item.value }}" + state: present + reload: yes + with_items: + - { name: kernel.keys.root_maxbytes, value: 25000000 } + - { name: kernel.keys.root_maxkeys, value: 1000000 } + - { name: kernel.panic, value: 10 } + - { name: kernel.panic_on_oops, value: 1 } + - { name: vm.overcommit_memory, value: 1 } + - { name: vm.panic_on_oom, value: 0 } + when: kubelet_protect_kernel_defaults|bool + +- name: Check dummy module + modprobe: + name: dummy + state: present + params: 'numdummies=0' + when: enable_nodelocaldns + +- name: Set additional sysctl variables + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: "{{ item.name }}" + value: "{{ item.value }}" + state: present + reload: yes + with_items: "{{ additional_sysctl }}" diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml new file mode 100644 index 0000000..d80d14e --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml @@ -0,0 +1,79 @@ +--- +- name: Ensure NTP package + package: + name: + - "{{ ntp_package }}" + state: present + +- name: Disable systemd-timesyncd + service: + name: systemd-timesyncd.service + enabled: false + state: stopped + failed_when: false + +- name: Set fact NTP settings + set_fact: + ntp_config_file: >- + {% if ntp_package == "ntp" -%} + /etc/ntp.conf + {%- elif ansible_os_family in ['RedHat', 'Suse'] -%} + /etc/chrony.conf + {%- else -%} + /etc/chrony/chrony.conf + {%- endif -%} + ntp_service_name: >- + {% if ntp_package == "chrony" -%} + chronyd + {%- elif ansible_os_family == 'RedHat' -%} + ntpd + {%- else -%} + ntp + {%- endif %} + +- name: Generate NTP configuration file. + template: + src: "{{ ntp_config_file | basename }}.j2" + dest: "{{ ntp_config_file }}" + mode: 0644 + notify: Preinstall | restart ntp + when: + - ntp_manage_config + +- name: Stop the NTP Deamon For Sync Immediately # `ntpd -gq`,`chronyd -q` requires the ntp daemon stop + service: + name: "{{ ntp_service_name }}" + state: stopped + when: + - ntp_force_sync_immediately + +- name: Force Sync NTP Immediately + command: >- + timeout -k 60s 60s + {% if ntp_package == "ntp" -%} + ntpd -gq + {%- else -%} + chronyd -q + {%- endif -%} + when: + - ntp_force_sync_immediately + +- name: Ensure NTP service is started and enabled + service: + name: "{{ ntp_service_name }}" + state: started + enabled: true + +- name: Ensure tzdata package + package: + name: + - tzdata + state: present + when: + - ntp_timezone + +- name: Set timezone + timezone: + name: "{{ ntp_timezone }}" + when: + - ntp_timezone diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0090-etchosts.yml new file mode 100644 index 0000000..ae4ffad --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0090-etchosts.yml @@ -0,0 +1,77 @@ +--- +- name: Hosts | create list from inventory + set_fact: + etc_hosts_inventory_block: |- + {% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} + {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%} + {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }} + {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %} + + {% endif %} + {% endfor %} + delegate_to: localhost + connection: local + delegate_facts: yes + run_once: yes + +- name: Hosts | populate inventory into hosts file + blockinfile: + path: /etc/hosts + block: "{{ hostvars.localhost.etc_hosts_inventory_block }}" + state: present + create: yes + backup: yes + unsafe_writes: yes + marker: "# Ansible inventory hosts {mark}" + mode: 0644 + when: populate_inventory_to_hosts_file + +- name: Hosts | populate kubernetes loadbalancer address into hosts file + lineinfile: + dest: /etc/hosts + regexp: ".*{{ apiserver_loadbalancer_domain_name }}$" + line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}" + state: present + backup: yes + unsafe_writes: yes + when: + - populate_loadbalancer_apiserver_to_hosts_file + - loadbalancer_apiserver is defined + - loadbalancer_apiserver.address is defined + +- name: Hosts | Retrieve hosts file content + slurp: + src: /etc/hosts + register: etc_hosts_content + +- name: Hosts | Extract existing entries for localhost from hosts file + set_fact: + etc_hosts_localhosts_dict: >- + {%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%} + {{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }} + with_items: "{{ (etc_hosts_content['content'] | b64decode).splitlines() }}" + when: + - etc_hosts_content.content is defined + - (item is match('^::1 .*') or item is match('^127.0.0.1 .*')) + +- name: Hosts | Update target hosts file entries dict with required entries + set_fact: + etc_hosts_localhosts_dict_target: >- + {%- set target_entries = (etc_hosts_localhosts_dict|default({})).get(item.key, []) | difference(item.value.get('unexpected' ,[])) -%} + {{ etc_hosts_localhosts_dict_target|default({}) | combine({item.key: (target_entries + item.value.expected)|unique}) }} + loop: "{{ etc_hosts_localhost_entries|dict2items }}" + +- name: Hosts | Update (if necessary) hosts file + lineinfile: + dest: /etc/hosts + line: "{{ item.key }} {{ item.value|join(' ') }}" + regexp: "^{{ item.key }}.*$" + state: present + backup: yes + unsafe_writes: yes + loop: "{{ etc_hosts_localhosts_dict_target|default({})|dict2items }}" + +# gather facts to update ansible_fqdn +- name: Update facts + setup: + gather_subset: min diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml new file mode 100644 index 0000000..50a6202 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml @@ -0,0 +1,33 @@ +--- +- name: Configure dhclient to supersede search/domain/nameservers + blockinfile: + block: |- + {% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%} + {{ item }} + {% endfor %} + path: "{{ dhclientconffile }}" + create: yes + state: present + insertbefore: BOF + backup: yes + marker: "# Ansible entries {mark}" + mode: 0644 + notify: Preinstall | propagate resolvconf to k8s components + +- name: Configure dhclient hooks for resolv.conf (non-RH) + template: + src: dhclient_dnsupdate.sh.j2 + dest: "{{ dhclienthookfile }}" + owner: root + mode: 0755 + notify: Preinstall | propagate resolvconf to k8s components + when: ansible_os_family not in [ "RedHat", "Suse" ] + +- name: Configure dhclient hooks for resolv.conf (RH-only) + template: + src: dhclient_dnsupdate_rh.sh.j2 + dest: "{{ dhclienthookfile }}" + owner: root + mode: 0755 + notify: Preinstall | propagate resolvconf to k8s components + when: ansible_os_family == "RedHat" diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml new file mode 100644 index 0000000..024e39f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml @@ -0,0 +1,18 @@ +--- + +# These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x +# or when changing resolvconf_mode) + +- name: Remove kubespray specific config from dhclient config + blockinfile: + path: "{{ dhclientconffile }}" + state: absent + backup: yes + marker: "# Ansible entries {mark}" + notify: Preinstall | propagate resolvconf to k8s components + +- name: Remove kubespray specific dhclient hook + file: + path: "{{ dhclienthookfile }}" + state: absent + notify: Preinstall | propagate resolvconf to k8s components diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml new file mode 100644 index 0000000..598399b --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml @@ -0,0 +1,44 @@ +--- + +# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time + +- name: install growpart + package: + name: cloud-utils-growpart + state: present + +- name: Gather mounts facts + setup: + gather_subset: 'mounts' + +- name: Search root filesystem device + vars: + query: "[?mount=='/'].device" + _root_device: "{{ ansible_mounts|json_query(query) }}" + set_fact: + device: "{{ _root_device | first | regex_replace('([^0-9]+)[0-9]+', '\\1') }}" + partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}" + root_device: "{{ _root_device }}" + +- name: check if growpart needs to be run + command: growpart -N {{ device }} {{ partition }} + failed_when: False + changed_when: "'NOCHANGE:' not in growpart_needed.stdout" + register: growpart_needed + environment: + LC_ALL: C + +- name: check fs type + command: file -Ls {{ root_device }} + changed_when: False + register: fs_type + +- name: run growpart # noqa 503 + command: growpart {{ device }} {{ partition }} + when: growpart_needed.changed + environment: + LC_ALL: C + +- name: run xfs_growfs # noqa 503 + command: xfs_growfs {{ root_device }} + when: growpart_needed.changed and 'XFS' in fs_type.stdout diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/main.yml new file mode 100644 index 0000000..45fa3d1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/tasks/main.yml @@ -0,0 +1,134 @@ +--- +# Disable swap +- import_tasks: 0010-swapoff.yml + when: + - not dns_late + - disable_swap + +- import_tasks: 0020-verify-settings.yml + when: + - not dns_late + tags: + - asserts + +- import_tasks: 0040-set_facts.yml + tags: + - resolvconf + - facts + +- import_tasks: 0050-create_directories.yml + when: + - not dns_late + +- import_tasks: 0060-resolvconf.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - systemd_resolved_enabled.rc != 0 + - networkmanager_enabled.rc != 0 + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0061-systemd-resolved.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - systemd_resolved_enabled.rc == 0 + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0062-networkmanager-unmanaged-devices.yml + when: + - networkmanager_enabled.rc == 0 + tags: + - bootstrap-os + +- import_tasks: 0063-networkmanager-dns.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - networkmanager_enabled.rc == 0 + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0070-system-packages.yml + when: + - not dns_late + tags: + - bootstrap-os + +- import_tasks: 0080-system-configurations.yml + when: + - not dns_late + tags: + - bootstrap-os + +- import_tasks: 0081-ntp-configurations.yml + when: + - not dns_late + - ntp_enabled + tags: + - bootstrap-os + +- import_tasks: 0090-etchosts.yml + when: + - not dns_late + tags: + - bootstrap-os + - etchosts + +- import_tasks: 0100-dhclient-hooks.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - dhclientconffile is defined + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0110-dhclient-hooks-undo.yml + when: + - dns_mode != 'none' + - resolvconf_mode != 'host_resolvconf' + - dhclientconffile is defined + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - bootstrap-os + - resolvconf + +# We need to make sure the network is restarted early enough so that docker can later pick up the correct system +# nameservers and search domains +- name: Flush handlers + meta: flush_handlers + +- name: Check if we are running inside a Azure VM + stat: + path: /var/lib/waagent/ + get_attributes: no + get_checksum: no + get_mime: no + register: azure_check + when: + - not dns_late + tags: + - bootstrap-os + +- import_tasks: 0120-growpart-azure-centos-7.yml + when: + - not dns_late + - azure_check.stat.exists + - ansible_os_family == "RedHat" + tags: + - bootstrap-os + +- name: Run calico checks + include_role: + name: network_plugin/calico + tasks_from: check + when: + - kube_network_plugin == 'calico' + - not ignore_assert_errors diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/ansible_git.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/ansible_git.j2 new file mode 100644 index 0000000..abf92a7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/ansible_git.j2 @@ -0,0 +1,3 @@ +; This file contains the information which identifies the deployment state relative to the git repo +[default] +{{ gitinfo.stdout }} diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/chrony.conf.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/chrony.conf.j2 new file mode 100644 index 0000000..7931f43 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/chrony.conf.j2 @@ -0,0 +1,27 @@ +# {{ ansible_managed }} + +# Specify one or more NTP servers. +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in ntp_servers %} +server {{ server }} +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +{% if ntp_tinker_panic is sameas true %} +# Force time sync if the drift exceeds the threshold specified +# Useful for VMs that can be paused and much later resumed. +makestep 1.0 -1 +{% else %} +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 +{% endif %} + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Specify directory for log files. +logdir /var/log/chrony diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 new file mode 100644 index 0000000..8cf8b81 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 @@ -0,0 +1,13 @@ +#!/bin/sh +# +# Prepend resolver options to /etc/resolv.conf after dhclient` +# regenerates the file. See man (5) resolver for more details. +# +if [ $reason = "BOUND" ]; then + if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then + RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d') + OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}" + + printf "%b\n" "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf + fi +fi diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 new file mode 100644 index 0000000..511839f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 @@ -0,0 +1,17 @@ +#!/bin/sh +# +# Prepend resolver options to /etc/resolv.conf after dhclient` +# regenerates the file. See man (5) resolver for more details. +# +zdnsupdate_config() { + if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then + RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d') + OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}" + + echo -e "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf + fi +} + +zdnsupdate_restore() { + : +} diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/ntp.conf.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/ntp.conf.j2 new file mode 100644 index 0000000..abeb899 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/ntp.conf.j2 @@ -0,0 +1,45 @@ +# {{ ansible_managed }} + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile {{ ntp_driftfile }} + +{% if ntp_tinker_panic is sameas true %} +# Always reset the clock, even if the new time is more than 1000s away +# from the current system time. Useful for VMs that can be paused +# and much later resumed. +tinker panic 0 +{% endif %} + +# Specify one or more NTP servers. +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for item in ntp_servers %} +pool {{ item }} +{% endfor %} + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery limited +restrict -6 default kod notrap nomodify nopeer noquery limited + +# Local users may interrogate the ntp server more closely. +{% for item in ntp_restrict %} +restrict {{ item }} +{% endfor %} + +# Needed for adding pool entries +restrict source notrap nomodify noquery + +# Disable the monitoring facility to prevent amplification attacks using ntpdc +# monlist command when default restrict does not include the noquery flag. See +# CVE-2013-5211 for more details. +# Note: Monitoring will not be disabled with the limited restriction flag. +disable monitor diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/resolvconf.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/resolvconf.j2 new file mode 100644 index 0000000..807fdd0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/resolvconf.j2 @@ -0,0 +1,10 @@ +#cloud-config +write_files: + - path: "/etc/resolv.conf" + permissions: "0644" + owner: "root" + content: | + {% for l in cloud_config.stdout_lines %} + {{ l }} + {% endfor %} + # diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/resolved.conf.j2 b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/resolved.conf.j2 new file mode 100644 index 0000000..901fd24 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/templates/resolved.conf.j2 @@ -0,0 +1,21 @@ +[Resolve] +{% if dns_early is sameas true and dns_late is sameas false %} +#DNS= +{% else %} +DNS={{ ([nodelocaldns_ip] if enable_nodelocaldns else coredns_server )| list | join(' ') }} +{% endif %} +FallbackDNS={{ ( upstream_dns_servers|d([]) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(' ') }} +{% if remove_default_searchdomains is sameas false or (remove_default_searchdomains is sameas true and searchdomains|default([])|length==0)%} +Domains={{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }} +{% else %} +Domains={{ searchdomains|default([]) | join(' ') }} +{% endif %} +#LLMNR=no +#MulticastDNS=no +DNSSEC=no +Cache=no-negative +{% if ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] %} +DNSStubListener=no +{% else %} +#DNSStubListener=yes +{% endif %} diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/amazon.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/amazon.yml new file mode 100644 index 0000000..09c645f --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/amazon.yml @@ -0,0 +1,7 @@ +--- +required_pkgs: + - libselinux-python + - device-mapper-libs + - nss + - conntrack-tools + - libseccomp diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/centos.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/centos.yml new file mode 100644 index 0000000..2a5b6c7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/centos.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + - device-mapper-libs + - nss + - conntrack + - container-selinux + - libseccomp diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/debian-11.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/debian-11.yml new file mode 100644 index 0000000..59cbc5a --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/debian-11.yml @@ -0,0 +1,10 @@ +--- +required_pkgs: + - python3-apt + - gnupg + - apt-transport-https + - software-properties-common + - conntrack + - iptables + - apparmor + - libseccomp2 diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/debian.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/debian.yml new file mode 100644 index 0000000..51a2802 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/debian.yml @@ -0,0 +1,9 @@ +--- +required_pkgs: + - python-apt + - aufs-tools + - apt-transport-https + - software-properties-common + - conntrack + - apparmor + - libseccomp2 diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/fedora.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/fedora.yml new file mode 100644 index 0000000..d69b111 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/fedora.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - iptables + - libselinux-python3 + - device-mapper-libs + - conntrack + - container-selinux + - libseccomp diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/redhat.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/redhat.yml new file mode 100644 index 0000000..2a5b6c7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/redhat.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + - device-mapper-libs + - nss + - conntrack + - container-selinux + - libseccomp diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/suse.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/suse.yml new file mode 100644 index 0000000..d089ac1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/suse.yml @@ -0,0 +1,5 @@ +--- +required_pkgs: + - device-mapper + - conntrack-tools + - libseccomp2 diff --git a/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/ubuntu.yml b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/ubuntu.yml new file mode 100644 index 0000000..85b3f25 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/preinstall/vars/ubuntu.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - python3-apt + - apt-transport-https + - software-properties-common + - conntrack + - apparmor + - libseccomp2 diff --git a/kubespray/extra_playbooks/roles/kubernetes/tokens/files/kube-gen-token.sh b/kubespray/extra_playbooks/roles/kubernetes/tokens/files/kube-gen-token.sh new file mode 100644 index 0000000..121b522 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/tokens/files/kube-gen-token.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +token_dir=${TOKEN_DIR:-/var/srv/kubernetes} +token_file="${token_dir}/known_tokens.csv" + +create_accounts=($@) + +if [ ! -e "${token_file}" ]; then + touch "${token_file}" +fi + +for account in "${create_accounts[@]}"; do + if grep ",${account}," "${token_file}" ; then + continue + fi + token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${token},${account},${account}" >> "${token_file}" + echo "${token}" > "${token_dir}/${account}.token" + echo "Added ${account}" +done diff --git a/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/check-tokens.yml b/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/check-tokens.yml new file mode 100644 index 0000000..ae75f0d --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/check-tokens.yml @@ -0,0 +1,41 @@ +--- +- name: "Check_tokens | check if the tokens have already been generated on first master" + stat: + path: "{{ kube_token_dir }}/known_tokens.csv" + get_attributes: no + get_checksum: yes + get_mime: no + delegate_to: "{{ groups['kube_control_plane'][0] }}" + register: known_tokens_master + run_once: true + +- name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false" + set_fact: + sync_tokens: false + gen_tokens: false + +- name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true" + set_fact: + gen_tokens: true + when: not known_tokens_master.stat.exists and kube_token_auth|default(true) + run_once: true + +- name: "Check tokens | check if a cert already exists" + stat: + path: "{{ kube_token_dir }}/known_tokens.csv" + get_attributes: no + get_checksum: yes + get_mime: no + register: known_tokens + +- name: "Check_tokens | Set 'sync_tokens' to true" + set_fact: + sync_tokens: >- + {%- set tokens = {'sync': False} -%} + {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch) + if (not hostvars[server].known_tokens.stat.exists) or + (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%} + {%- set _ = tokens.update({'sync': True}) -%} + {%- endfor -%} + {{ tokens.sync }} + run_once: true diff --git a/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/gen_tokens.yml b/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/gen_tokens.yml new file mode 100644 index 0000000..aa1cf21 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -0,0 +1,64 @@ +--- +- name: Gen_tokens | copy tokens generation script + copy: + src: "kube-gen-token.sh" + dest: "{{ kube_script_dir }}/kube-gen-token.sh" + mode: 0700 + run_once: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: gen_tokens|default(false) + +- name: Gen_tokens | generate tokens for master components + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" + environment: + TOKEN_DIR: "{{ kube_token_dir }}" + with_nested: + - [ "system:kubectl" ] + - "{{ groups['kube_control_plane'] }}" + register: gentoken_master + changed_when: "'Added' in gentoken_master.stdout" + run_once: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: gen_tokens|default(false) + +- name: Gen_tokens | generate tokens for node components + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" + environment: + TOKEN_DIR: "{{ kube_token_dir }}" + with_nested: + - [ 'system:kubelet' ] + - "{{ groups['kube_node'] }}" + register: gentoken_node + changed_when: "'Added' in gentoken_node.stdout" + run_once: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: gen_tokens|default(false) + +- name: Gen_tokens | Get list of tokens from first master + command: "find {{ kube_token_dir }} -maxdepth 1 -type f" + register: tokens_list + check_mode: no + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: sync_tokens|default(false) + +- name: Gen_tokens | Gather tokens + shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" + args: + warn: false + executable: /bin/bash + register: tokens_data + check_mode: no + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: sync_tokens|default(false) + +- name: Gen_tokens | Copy tokens on masters + shell: "set -o pipefail && echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /" + args: + executable: /bin/bash + when: + - inventory_hostname in groups['kube_control_plane'] + - sync_tokens|default(false) + - inventory_hostname != groups['kube_control_plane'][0] + - tokens_data.stdout diff --git a/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/main.yml b/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/main.yml new file mode 100644 index 0000000..d454a80 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubernetes/tokens/tasks/main.yml @@ -0,0 +1,19 @@ +--- + +- import_tasks: check-tokens.yml + tags: + - k8s-secrets + - k8s-gen-tokens + - facts + +- name: Make sure the tokens directory exits + file: + path: "{{ kube_token_dir }}" + state: directory + mode: 0644 + group: "{{ kube_cert_group }}" + +- import_tasks: gen_tokens.yml + tags: + - k8s-secrets + - k8s-gen-tokens diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/defaults/main.yaml b/kubespray/extra_playbooks/roles/kubespray-defaults/defaults/main.yaml new file mode 100644 index 0000000..00b7388 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/defaults/main.yaml @@ -0,0 +1,680 @@ +--- +# Use proxycommand if bastion host is in group all +# This change obseletes editing ansible.cfg file depending on bastion existence +ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p -p {{ hostvars['bastion']['ansible_port'] | default(22) }} {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" + +# selinux state +preinstall_selinux_state: permissive + +kube_api_anonymous_auth: true + +# Default value, but will be set to true automatically if detected +is_fedora_coreos: false + +# optional disable the swap +disable_swap: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +## The minimum version working +kube_version_min_required: v1.23.0 + +## Kube Proxy mode One of ['iptables','ipvs'] +kube_proxy_mode: ipvs + +## List of kubeadm init phases that should be skipped during control plane setup +## By default 'addon/coredns' is skipped +## 'addon/kube-proxy' gets skipped for some network plugins +kubeadm_init_phases_skip_default: [ "addon/coredns" ] +kubeadm_init_phases_skip: >- + {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- elif kube_proxy_remove is defined and kube_proxy_remove -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- else -%} + {{ kubeadm_init_phases_skip_default }} + {%- endif -%} + +# List of kubeadm phases that should be skipped when joining a new node +# You may need to set this to ['preflight'] for air-gaped deployments to avoid failing connectivity tests. +kubeadm_join_phases_skip_default: [] +kubeadm_join_phases_skip: >- + {{ kubeadm_join_phases_skip_default }} + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# Set to true to allow pre-checks to fail and continue deployment +ignore_assert_errors: false + +kube_vip_enabled: false + +# nginx-proxy configure +nginx_config_dir: "/etc/nginx" + +# haproxy configure +haproxy_config_dir: "/etc/haproxy" + +# Directory where the binaries will be installed +bin_dir: /usr/local/bin +docker_bin_dir: /usr/bin +containerd_bin_dir: "{{ bin_dir }}" +etcd_data_dir: /var/lib/etcd +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# Install epel repo on Centos/RHEL +epel_enabled: false + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# Default resolv.conf options +docker_dns_options: +- ndots:{{ ndots }} +- timeout:2 +- attempts:2 +# Can be coredns, coredns_dual, manual, or none +dns_mode: coredns + +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 + +# Should be set to a cluster IP if using a custom cluster DNS +manual_dns_server: "" + +# Can be host_resolvconf, docker_dns or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes DNS service (called skydns for historical reasons) +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" +docker_dns_search_domains: +- 'default.svc.{{ dns_domain }}' +- 'svc.{{ dns_domain }}' + +kube_dns_servers: + coredns: ["{{skydns_server}}"] + coredns_dual: "{{[skydns_server] + [ skydns_server_secondary ]}}" + manual: ["{{manual_dns_server}}"] + +dns_servers: "{{kube_dns_servers[dns_mode]}}" + +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local + +enable_coredns_k8s_endpoint_pod_names: false + +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# Kubectl command +# This is for consistency when using kubectl command in roles, and ensure +kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# compatibility directory for kubeadm +kube_cert_compat_dir: "/etc/kubernetes/pki" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Set to true when the CAs are managed externally. +# When true, disables all tasks manipulating certificates. Ensure before the kubespray run that: +# - Certificates and CAs are present in kube_cert_dir +# - Kubeconfig files are present in kube_config_dir +kube_external_ca_mode: false + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico +kube_network_plugin_multus: false + +# Determines if calico_rr group exists +peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}" + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +calico_datastore: "kdd" + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The virtual cluster IP, real host IPs and ports the API Server will be +# listening on. +# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint +# access IP value (automatically evaluated below) +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" + +# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too. +kube_apiserver_bind_address: 0.0.0.0 + +# https +kube_apiserver_port: 6443 + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Aggregator +kube_api_aggregator_routing: false + +# Profiling +kube_profiling: false + +# Graceful Node Shutdown +kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods should be less than kubelet_shutdown_grace_period +# to give normal pods time to be gracefully evacuated +kubelet_shutdown_grace_period_critical_pods: 20s + +# Whether to deploy the container engine +deploy_container_engine: "{{ inventory_hostname in groups['k8s_cluster'] or etcd_deployment_type == 'docker' }}" + +# Container for runtime +container_manager: containerd + +# Enable Kata Containers as additional container runtime +# When enabled, it requires `container_manager` different than Docker +kata_containers_enabled: false + +# Enable gVisor as an additional container runtime +# gVisor is only supported with container_manager Docker or containerd +gvisor_enabled: false + +# Enable crun as additional container runtime +# When enabled, it requires container_manager=crio +crun_enabled: false + +# Enable youki as additional container runtime +# When enabled, it requires container_manager=crio +youki_enabled: false + +# Container on localhost (download images when download_localhost is true) +container_manager_on_localhost: "{{ container_manager }}" + +# CRI socket path +cri_socket: >- + {%- if container_manager == 'crio' -%} + unix:///var/run/crio/crio.sock + {%- elif container_manager == 'containerd' -%} + unix:///var/run/containerd/containerd.sock + {%- elif container_manager == 'docker' -%} + unix:///var/run/cri-dockerd.sock + {%- endif -%} + +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +## A list of insecure docker registries (IP address or domain name), for example +## to allow insecure-registry access to self-hosted registries. Empty by default. +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 +docker_insecure_registries: [] + +## A list of additional registry mirrors, for example China registry mirror. Empty by default. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com +docker_registry_mirrors: [] + +## If non-empty will override default system MounFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +# docker_options: "" + +## A list of plugins to install using 'docker plugin install --grant-all-permissions' +## Empty by default so no plugins will be installed. +docker_plugins: [] + +# Containerd options - thse are relevant when container_manager == 'containerd' +containerd_use_systemd_cgroup: true + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11:5000 +containerd_insecure_registries: [] + +# Containerd conf default dir +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_systemd_dir: "/etc/systemd/system/containerd.service.d" +containerd_cfg_dir: "/etc/containerd" + +# Settings for containerized control plane (etcd/kubelet/secrets) +# deployment type for legacy etcd mode +etcd_deployment_type: host +cert_management: script + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +kubeconfig_localhost: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +kubectl_localhost: false + +# Define credentials_dir here so it can be overridden +credentials_dir: "{{ inventory_dir }}/credentials" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +dashboard_enabled: false + +# Addons which can be enabled +helm_enabled: false +krew_enabled: false +registry_enabled: false +metrics_server_enabled: false +enable_network_policy: true +local_path_provisioner_enabled: false +local_volume_provisioner_enabled: false +local_volume_provisioner_directory_mode: 0700 +cinder_csi_enabled: false +aws_ebs_csi_enabled: false +azure_csi_enabled: false +gcp_pd_csi_enabled: false +vsphere_csi_enabled: false +upcloud_csi_enabled: false +csi_snapshot_controller_enabled: false +persistent_volumes_enabled: false +cephfs_provisioner_enabled: false +rbd_provisioner_enabled: false +ingress_nginx_enabled: false +ingress_alb_enabled: false +cert_manager_enabled: false +expand_persistent_volumes: false +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +argocd_enabled: false + +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +openstack_blockstorage_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}" +# set max volumes per node (cinder-csi), default not set +# node_volume_attach_limit: 25 +# Cinder CSI topology, when false volumes can be cross-mounted between availability zones +# cinder_topology: false +# Set Cinder topology zones (can be multiple zones, default not set) +# cinder_topology_zones: +# - nova +cinder_csi_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}" + +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +openstack_lbaas_enabled: false +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +openstack_lbaas_create_monitor: "yes" +openstack_lbaas_monitor_delay: "1m" +openstack_lbaas_monitor_timeout: "30s" +openstack_lbaas_monitor_max_retries: "3" +openstack_cacert: "{{ lookup('env','OS_CACERT') }}" + +# Default values for the external OpenStack Cloud Controller +external_openstack_enable_ingress_hostname: false +external_openstack_ingress_hostname_suffix: "nip.io" +external_openstack_max_shared_lb: 2 +external_openstack_lbaas_create_monitor: false +external_openstack_lbaas_monitor_delay: "1m" +external_openstack_lbaas_monitor_timeout: "30s" +external_openstack_lbaas_monitor_max_retries: "3" +external_openstack_network_ipv6_disabled: false +external_openstack_lbaas_use_octavia: false +external_openstack_network_internal_networks: [] +external_openstack_network_public_networks: [] + +# Default values for the external Hcloud Cloud Controller +external_hcloud_cloud: + hcloud_api_token: "" + token_secret_name: hcloud + + service_account_name: cloud-controller-manager + + controller_image_tag: "latest" + ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset + ## Format: + ## external_hcloud_cloud.controller_extra_args: + ## arg1: "value1" + ## arg2: "value2" + controller_extra_args: {} + +## List of authorization modes that must be configured for +## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and +## 'RBAC' modes are tested. Order is important. +authorization_modes: ['Node', 'RBAC'] +rbac_enabled: "{{ 'RBAC' in authorization_modes }}" + +# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet's HTTPS endpoint +kubelet_authentication_token_webhook: true + +# When enabled, access to the kubelet API requires authorization by delegation to the API server +kubelet_authorization_mode_webhook: false + +# kubelet uses certificates for authenticating to the Kubernetes API +# Automatically generate a new key and request a new certificate from the Kubernetes API as the current certificate approaches expiration +kubelet_rotate_certificates: true +# kubelet can also request a new server certificate from the Kubernetes API +kubelet_rotate_server_certificates: false + +# If set to true, kubelet errors if any of kernel tunables is different than kubelet defaults +kubelet_protect_kernel_defaults: true + +# Set additional sysctl variables to modify Linux kernel variables, for example: +# additional_sysctl: +# - { name: kernel.pid_max, value: 131072 } +# +additional_sysctl: [] + +## List of key=value pairs that describe feature gates for +## the k8s cluster. +kube_feature_gates: [] +kube_apiserver_feature_gates: [] +kube_controller_feature_gates: [] +kube_scheduler_feature_gates: [] +kube_proxy_feature_gates: [] +kubelet_feature_gates: [] +kubeadm_feature_gates: [] + +# Local volume provisioner storage classes +# Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted +# see https://github.com/ansible/ansible/issues/17324 +local_volume_provisioner_storage_classes: | + { + "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { + "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}", + "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", + "volume_mode": "Filesystem", + "fs_type": "ext4" + + } + } + +# weave's network password for encryption +# if null then no network encryption +# you can use --extra-vars to pass the password in command line +weave_password: EnterPasswordHere + +ssl_ca_dirs: |- + [ + {% if ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] -%} + '/usr/share/ca-certificates', + {% elif ansible_os_family == 'RedHat' -%} + '/etc/pki/tls', + '/etc/pki/ca-trust', + {% elif ansible_os_family == 'Debian' -%} + '/usr/share/ca-certificates', + {% endif -%} + ] + +# Vars for pointing to kubernetes api endpoints +is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}" +kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}" +kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}" +kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" +first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(fallback_ips[groups['kube_control_plane'][0]])) }}" +loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}" +loadbalancer_apiserver_type: "nginx" +# applied if only external loadbalancer_apiserver is defined, otherwise ignored +apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" +kube_apiserver_global_endpoint: |- + {% if loadbalancer_apiserver is defined -%} + https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool -%} + https://127.0.0.1:{{ kube_apiserver_port }} + {%- else -%} + https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- endif %} +kube_apiserver_endpoint: |- + {% if loadbalancer_apiserver is defined -%} + https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%} + https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} + {%- elif is_kube_master -%} + https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }} + {%- else -%} + https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- endif %} +kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt" +kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key" + +# Set to true to deploy etcd-events cluster +etcd_events_cluster_enabled: false + +# etcd group can be empty when kubeadm manages etcd +etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}" + +# Vars for pointing to etcd endpoints +is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" +etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}" +etcd_access_address: "{{ access_ip | default(etcd_address) }}" +etcd_events_access_address: "{{ access_ip | default(etcd_address) }}" +etcd_peer_url: "https://{{ etcd_access_address }}:2380" +etcd_client_url: "https://{{ etcd_access_address }}:2379" +etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382" +etcd_events_client_url: "https://{{ etcd_events_access_address }}:2383" +etcd_access_addresses: |- + {% for item in etcd_hosts -%} + https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:2379{% if not loop.last %},{% endif %} + {%- endfor %} +etcd_events_access_addresses_list: |- + [ + {% for item in etcd_hosts -%} + 'https://{{ hostvars[item]['etcd_events_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:2383'{% if not loop.last %},{% endif %} + {%- endfor %} + ] +etcd_metrics_addresses: |- + {% for item in etcd_hosts -%} + https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %} + {%- endfor %} +etcd_events_access_addresses: "{{etcd_events_access_addresses_list | join(',')}}" +etcd_events_access_addresses_semicolon: "{{etcd_events_access_addresses_list | join(';')}}" +# user should set etcd_member_name in inventory/mycluster/hosts.ini +etcd_member_name: |- + {% for host in groups['etcd'] %} + {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %} + {% endfor %} +etcd_peer_addresses: |- + {% for item in groups['etcd'] -%} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %} + {%- endfor %} +etcd_events_peer_addresses: |- + {% for item in groups['etcd'] -%} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %} + {%- endfor %} + +podsecuritypolicy_enabled: false +etcd_heartbeat_interval: "250" +etcd_election_timeout: "5000" +etcd_snapshot_count: "10000" + +certificates_key_size: 2048 +certificates_duration: 36500 + +etcd_config_dir: /etc/ssl/etcd +etcd_events_data_dir: "/var/lib/etcd-events" +etcd_cert_dir: "{{ etcd_config_dir }}/ssl" + +typha_enabled: false + +calico_apiserver_enabled: false + +_host_architecture_groups: + x86_64: amd64 + aarch64: arm64 + armv7l: arm +host_architecture: >- + {%- if ansible_architecture in _host_architecture_groups -%} + {{ _host_architecture_groups[ansible_architecture] }} + {%- else -%} + {{ ansible_architecture }} + {%- endif -%} + +_host_os_groups: + Linux: linux + Darwin: darwin + Win32NT: windows +host_os: >- + {%- if ansible_system in _host_os_groups -%} + {{ _host_os_groups[ansible_system] }} + {%- else -%} + {{ ansible_system }} + {%- endif -%} + +# Sets the eventRecordQPS parameter in kubelet-config.yaml. The default value is 5 (see types.go) +# Setting it to 0 allows unlimited requests per second. +kubelet_event_record_qps: 5 + +proxy_env: + http_proxy: "{{ http_proxy | default ('') }}" + HTTP_PROXY: "{{ http_proxy | default ('') }}" + https_proxy: "{{ https_proxy | default ('') }}" + HTTPS_PROXY: "{{ https_proxy | default ('') }}" + no_proxy: "{{ no_proxy | default ('') }}" + NO_PROXY: "{{ no_proxy | default ('') }}" + +proxy_disable_env: + ALL_PROXY: '' + FTP_PROXY: '' + HTTPS_PROXY: '' + HTTP_PROXY: '' + NO_PROXY: '' + all_proxy: '' + ftp_proxy: '' + http_proxy: '' + https_proxy: '' + no_proxy: '' + +# krew root dir +krew_root_dir: "/usr/local/krew" + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/meta/main.yml b/kubespray/extra_playbooks/roles/kubespray-defaults/meta/main.yml new file mode 100644 index 0000000..88d7024 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: download + skip_downloads: true + tags: + - facts diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/fallback_ips.yml b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/fallback_ips.yml new file mode 100644 index 0000000..acca31c --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/fallback_ips.yml @@ -0,0 +1,31 @@ +--- +# Set 127.0.0.1 as fallback IP if we do not have host facts for host +# ansible_default_ipv4 isn't what you think. +# Thanks https://medium.com/opsops/ansible-default-ipv4-is-not-what-you-think-edb8ab154b10 + +- name: Gather ansible_default_ipv4 from all hosts + tags: always + include_tasks: fallback_ips_gather.yml + when: hostvars[delegate_host_to_gather_facts].ansible_default_ipv4 is not defined + loop: "{{ groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]) }}" + loop_control: + loop_var: delegate_host_to_gather_facts + run_once: yes + +- name: create fallback_ips_base + set_fact: + fallback_ips_base: | + --- + {% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %} + {% set found = hostvars[item].get('ansible_default_ipv4') %} + {{ item }}: "{{ found.get('address', '127.0.0.1') }}" + {% endfor %} + delegate_to: localhost + connection: local + delegate_facts: yes + become: no + run_once: yes + +- name: set fallback_ips + set_fact: + fallback_ips: "{{ hostvars.localhost.fallback_ips_base | from_yaml }}" diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/fallback_ips_gather.yml b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/fallback_ips_gather.yml new file mode 100644 index 0000000..2d2d000 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/fallback_ips_gather.yml @@ -0,0 +1,11 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: "Gather ansible_default_ipv4 from {{ delegate_host_to_gather_facts }}" + setup: + gather_subset: '!all,network' + filter: "ansible_default_ipv4" + delegate_to: "{{ delegate_host_to_gather_facts }}" + connection: "{{ (delegate_host_to_gather_facts == 'localhost') | ternary('local', omit) }}" + delegate_facts: yes diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/main.yaml b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/main.yaml new file mode 100644 index 0000000..648a4af --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/main.yaml @@ -0,0 +1,33 @@ +--- +- name: Configure defaults + debug: + msg: "Check roles/kubespray-defaults/defaults/main.yml" + tags: + - always + +# do not run gather facts when bootstrap-os in roles +- name: set fallback_ips + import_tasks: fallback_ips.yml + when: + - "'bootstrap-os' not in ansible_play_role_names" + - fallback_ips is not defined + tags: + - always + +- name: set no_proxy + import_tasks: no_proxy.yml + when: + - "'bootstrap-os' not in ansible_play_role_names" + - http_proxy is defined or https_proxy is defined + - no_proxy is not defined + tags: + - always + +# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled` +- name: Set `etcd_deployment_type` to "kubeadm" if `etcd_kubeadm_enabled` is true + set_fact: + etcd_deployment_type: kubeadm + when: + - etcd_kubeadm_enabled is defined and etcd_kubeadm_enabled + tags: + - always diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/no_proxy.yml b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/no_proxy.yml new file mode 100644 index 0000000..6e6a5c9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/tasks/no_proxy.yml @@ -0,0 +1,38 @@ +--- +- name: Set no_proxy to all assigned cluster IPs and hostnames + set_fact: + no_proxy_prepare: >- + {%- if loadbalancer_apiserver is defined -%} + {{ apiserver_loadbalancer_domain_name| default('') }}, + {{ loadbalancer_apiserver.address | default('') }}, + {%- endif -%} + {%- if no_proxy_exclude_workers | default(false) -%} + {% set cluster_or_master = 'kube_control_plane' %} + {%- else -%} + {% set cluster_or_master = 'k8s_cluster' %} + {%- endif -%} + {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} + {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}, + {%- if item != hostvars[item].get('ansible_hostname', '') -%} + {{ hostvars[item]['ansible_hostname'] }}, + {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}, + {%- endif -%} + {{ item }},{{ item }}.{{ dns_domain }}, + {%- endfor -%} + {%- if additional_no_proxy is defined -%} + {{ additional_no_proxy }}, + {%- endif -%} + 127.0.0.1,localhost,{{ kube_service_addresses }},{{ kube_pods_subnet }},svc,svc.{{ dns_domain }} + delegate_to: localhost + connection: local + delegate_facts: yes + become: no + run_once: yes + +- name: Populates no_proxy to all hosts + set_fact: + no_proxy: "{{ hostvars.localhost.no_proxy_prepare }}" + proxy_env: "{{ proxy_env | combine({ + 'no_proxy': hostvars.localhost.no_proxy_prepare, + 'NO_PROXY': hostvars.localhost.no_proxy_prepare + }) }}" diff --git a/kubespray/extra_playbooks/roles/kubespray-defaults/vars/main.yml b/kubespray/extra_playbooks/roles/kubespray-defaults/vars/main.yml new file mode 100644 index 0000000..c79edf5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/kubespray-defaults/vars/main.yml @@ -0,0 +1,9 @@ +--- +# Kubespray constants + +kube_proxy_deployed: "{{ 'addon/kube-proxy' not in kubeadm_init_phases_skip }}" + +# The lowest version allowed to upgrade from (same as calico_version in the previous branch) +calico_min_version_required: "v3.19.4" + +containerd_min_version_required: "1.3.7" diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/defaults/main.yml new file mode 100644 index 0000000..0c2be2d --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/defaults/main.yml @@ -0,0 +1,165 @@ +--- +# the default value of name +calico_cni_name: k8s-pod-network + +# Enables Internet connectivity from containers +nat_outgoing: true + +# add default ippool name +calico_pool_name: "default-pool" +calico_ipv4pool_ipip: "Off" + +# Change encapsulation mode, by default we enable vxlan which is the most mature and well tested mode +calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet' +calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet' + +calico_cni_pool: true +calico_cni_pool_ipv6: true + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# Calico doesn't support ipip tunneling for the IPv6. +calico_ipip_mode_ipv6: Never +calico_vxlan_mode_ipv6: Never + +# add default ipv6 ippool blockSize (defaults kube_network_node_prefix_ipv6) +calico_pool_blocksize_ipv6: 122 + +# Calico network backend can be 'bird', 'vxlan' and 'none' +calico_network_backend: vxlan + +calico_cert_dir: /etc/calico/certs + +# Global as_num (/calico/bgp/v1/global/as_num) +global_as_num: "64512" + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Advertise Service External IPs +calico_advertise_service_external_ips: [] + +# Advertise Service LoadBalancer IPs +calico_advertise_service_loadbalancer_ips: [] + +# Calico eBPF support +calico_bpf_enabled: false +calico_bpf_log_level: "" +# Valid option for service mode: Tunnel (default), DSR=Direct Server Return +calico_bpf_service_mode: Tunnel + +# Limits for apps +calico_node_memory_limit: 500M +calico_node_cpu_limit: 300m +calico_node_memory_requests: 64M +calico_node_cpu_requests: 150m +calico_felix_chaininsertmode: Insert + +# Calico daemonset nodeselector +calico_ds_nodeselector: "kubernetes.io/os: linux" + +# Virtual network ID to use for VXLAN traffic. A value of 0 means “use the kernel defaultâ€. +calico_vxlan_vni: 4096 + +# Port to use for VXLAN traffic. A value of 0 means “use the kernel defaultâ€. +calico_vxlan_port: 4789 + +# Enable Prometheus Metrics endpoint for felix +calico_felix_prometheusmetricsenabled: false +calico_felix_prometheusmetricsport: 9091 +calico_felix_prometheusgometricsenabled: true +calico_felix_prometheusprocessmetricsenabled: true + +# Set the agent log level. Can be debug, warning, info or fatal +calico_loglevel: info +calico_node_startup_loglevel: error + +# Set log path for calico CNI plugin. Set to false to disable logging to disk. +calico_cni_log_file_path: /var/log/calico/cni/cni.log + +# Enable or disable usage report to 'usage.projectcalico.org' +calico_usage_reporting: false + +# Should calico ignore kernel's RPF check setting, +# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198 +calico_node_ignorelooserpf: false + +# Define address on which Felix will respond to health requests +calico_healthhost: "localhost" + +# Configure time in seconds that calico will wait for the iptables lock +calico_iptables_lock_timeout_secs: 10 + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND) +calico_iptables_backend: "Auto" + +# Calico Wireguard support +calico_wireguard_enabled: false +calico_wireguard_packages: [] +calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-{{ ansible_distribution_major_version }}-$basearch/ + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://projectcalico.docs.tigera.io/reference/node/configuration#ip-autodetection-methods +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" + +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# The default value for calico_datastore is set in role kubespray-default + +# Use typha (only with kdd) +typha_enabled: false +typha_prometheusmetricsenabled: false +typha_prometheusmetricsport: 9093 + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +typha_replicas: 1 + +# Set max typha connections +typha_max_connections_lower_limit: 300 + +# Generate certifcates for typha<->calico-node communication +typha_secure: false + +calico_feature_control: {} + +# Calico default BGP port +calico_bgp_listen_port: 179 + +# Calico FelixConfiguration options +calico_felix_reporting_interval: 0s +calico_felix_log_severity_screen: Info + +# Calico container settings +calico_allow_ip_forwarding: false + +# Calico IPAM strictAffinity +calico_ipam_strictaffinity: false + +# Calico IPAM autoAllocateBlocks +calico_ipam_autoallocateblocks: true + +# Calico IPAM maxBlocksPerHost, default 0 +calico_ipam_maxblocksperhost: 0 + +# Calico apiserver (only with kdd) +calico_apiserver_enabled: false + +# Calico feature detect override, set "ChecksumOffloadBroken=true" to +# solve the https://github.com/projectcalico/calico/issues/3145 +calico_feature_detect_override: "" diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/files/openssl.conf b/kubespray/extra_playbooks/roles/network_plugin/calico/files/openssl.conf new file mode 100644 index 0000000..f4ba47d --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/files/openssl.conf @@ -0,0 +1,27 @@ +req_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment + +[ ssl_client ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +[ v3_ca ] +basicConstraints = CA:TRUE +keyUsage = cRLSign, digitalSignature, keyCertSign +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid:always,issuer + +[ ssl_client_apiserver ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +subjectAltName = DNS:calico-api.calico-apiserver.svc diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/handlers/main.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/handlers/main.yml new file mode 100644 index 0000000..b4b7af8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/handlers/main.yml @@ -0,0 +1,27 @@ +--- +- name: reset_calico_cni + command: /bin/true + when: calico_cni_config is defined + notify: + - delete 10-calico.conflist + - Calico | delete calico-node docker containers + - Calico | delete calico-node crio/containerd containers + +- name: delete 10-calico.conflist + file: + path: /etc/cni/net.d/10-calico.conflist + state: absent + +- name: Calico | delete calico-node docker containers + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + register: docker_calico_node_remove + until: docker_calico_node_remove is succeeded + retries: 5 + when: container_manager in ["docker"] + +- name: Calico | delete calico-node crio/containerd containers + shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + register: crictl_calico_node_remove + until: crictl_calico_node_remove is succeeded + retries: 5 + when: container_manager in ["crio", "containerd"] diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/rr/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/defaults/main.yml new file mode 100644 index 0000000..dedda19 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# Global as_num (/calico/bgp/v1/global/as_num) +# should be the same as in calico role +global_as_num: "64512" +calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/main.yml new file mode 100644 index 0000000..6164552 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Calico-rr | Pre-upgrade tasks + include_tasks: pre.yml + +- name: Calico-rr | Configuring node tasks + include_tasks: update-node.yml + +- name: Calico-rr | Set label for route reflector # noqa 301 + command: >- + {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} + 'i-am-a-route-reflector=true' --overwrite + changed_when: false + register: calico_rr_label + until: calico_rr_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/pre.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/pre.yml new file mode 100644 index 0000000..d8dbd80 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/pre.yml @@ -0,0 +1,15 @@ +--- +- name: Calico-rr | Disable calico-rr service if it exists + service: + name: calico-rr + state: stopped + enabled: no + failed_when: false + +- name: Calico-rr | Delete obsolete files + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/calico/calico-rr.env + - /etc/systemd/system/calico-rr.service diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/update-node.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/update-node.yml new file mode 100644 index 0000000..7070076 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/rr/tasks/update-node.yml @@ -0,0 +1,48 @@ +--- +# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it, +# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203 +- block: + - name: Set the retry count + set_fact: + retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}" + + - name: Calico | Set label for route reflector # noqa 301 305 + shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite" + changed_when: false + register: calico_rr_id_label + until: calico_rr_id_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + when: calico_rr_id is defined + + - name: Calico-rr | Fetch current node object + command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson" + changed_when: false + register: calico_rr_node + until: calico_rr_node is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + + - name: Calico-rr | Set route reflector cluster ID + set_fact: + calico_rr_node_patched: >- + {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': + { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }} + + - name: Calico-rr | Configure route reflector # noqa 301 305 + shell: "{{ bin_dir }}/calicoctl.sh replace -f-" + args: + stdin: "{{ calico_rr_node_patched | to_json }}" + + rescue: + - name: Fail if retry limit is reached + fail: + msg: Ended after 10 retries + when: retry_count|int == 10 + + - name: Retrying node configuration + debug: + msg: "Failed to configure route reflector - Retrying..." + + - name: Retry node configuration + include_tasks: update-node.yml diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml new file mode 100644 index 0000000..fc336e4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml @@ -0,0 +1,60 @@ +--- +- name: Calico | Check if calico apiserver exists + command: "{{ kubectl }} -n calico-apiserver get secret calico-apiserver-certs" + register: calico_apiserver_secret + changed_when: false + failed_when: false + +- name: Calico | Create ns manifests + template: + src: "calico-apiserver-ns.yml.j2" + dest: "{{ kube_config_dir }}/calico-apiserver-ns.yml" + mode: 0644 + +- name: Calico | Apply ns manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/calico-apiserver-ns.yml" + state: "latest" + +- name: Calico | Ensure calico certs dir + file: + path: /etc/calico/certs + state: directory + mode: 0755 + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Copy ssl script for apiserver certs + template: + src: make-ssl-calico.sh.j2 + dest: "{{ bin_dir }}/make-ssl-apiserver.sh" + mode: 0755 + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Copy ssl config for apiserver certs + copy: + src: openssl.conf + dest: /etc/calico/certs/openssl.conf + mode: 0644 + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Generate apiserver certs + command: >- + {{ bin_dir }}/make-ssl-apiserver.sh + -f /etc/calico/certs/openssl.conf + -c {{ kube_cert_dir }} + -d /etc/calico/certs + -s apiserver + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Create calico apiserver generic secrets + command: >- + {{ kubectl }} -n calico-apiserver + create secret generic {{ item.name }} + --from-file={{ item.cert }} + --from-file={{ item.key }} + with_items: + - name: calico-apiserver-certs + cert: /etc/calico/certs/apiserver.crt + key: /etc/calico/certs/apiserver.key + when: calico_apiserver_secret.rc != 0 diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/check.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/check.yml new file mode 100644 index 0000000..530985f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/check.yml @@ -0,0 +1,194 @@ +--- +- name: Stop if legacy encapsulation variables are detected (ipip) + assert: + that: + - ipip is not defined + msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if legacy encapsulation variables are detected (ipip_mode) + assert: + that: + - ipip_mode is not defined + msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks) + assert: + that: + - calcio_ipam_autoallocateblocks is not defined + msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + + +- name: Stop if incompatible network plugin and cloudprovider + assert: + that: + - calico_ipip_mode == 'Never' + - calico_vxlan_mode in ['Always', 'CrossSubnet'] + msg: "When using cloud_provider azure and network_plugin calico calico_ipip_mode must be 'Never' and calico_vxlan_mode 'Always' or 'CrossSubnet'" + when: + - cloud_provider is defined and cloud_provider == 'azure' + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if supported Calico versions + assert: + that: + - "calico_version in calico_crds_archive_checksums.keys()" + msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.keys() }}" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Get current calico cluster version + shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'" + args: + executable: /bin/bash + register: calico_version_on_server + async: 10 + poll: 3 + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + changed_when: false + failed_when: false + +- name: Check that current calico version is enough for upgrade + assert: + that: + - calico_version_on_server.stdout is version(calico_min_version_required, '>=') + msg: > + Your version of calico is not fresh enough for upgrade. + Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release. + when: + - 'calico_version_on_server.stdout is defined' + - calico_version_on_server.stdout + - inventory_hostname == groups['kube_control_plane'][0] + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check that cluster_id is set if calico_rr enabled" + assert: + that: + - cluster_id is defined + msg: "A unique cluster_id is required if using calico_rr" + when: + - peer_with_calico_rr + - inventory_hostname == groups['kube_control_plane'][0] + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check that calico_rr nodes are in k8s_cluster group" + assert: + that: + - '"k8s_cluster" in group_names' + msg: "calico_rr must be a child group of k8s_cluster group" + when: + - '"calico_rr" in group_names' + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check vars defined correctly" + assert: + that: + - "calico_pool_name is defined" + - "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')" + msg: "calico_pool_name contains invalid characters" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check calico network backend defined correctly" + assert: + that: + - "calico_network_backend in ['bird', 'vxlan', 'none']" + msg: "calico network backend is not 'bird', 'vxlan' or 'none'" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode defined correctly" + assert: + that: + - "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']" + - "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']" + msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode if simultaneously enabled" + assert: + that: + - "calico_vxlan_mode in ['Never']" + msg: "IP in IP and VXLAN mode is mutualy exclusive modes" + when: + - "calico_ipip_mode in ['Always', 'CrossSubnet']" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode if simultaneously enabled" + assert: + that: + - "calico_ipip_mode in ['Never']" + msg: "IP in IP and VXLAN mode is mutualy exclusive modes" + when: + - "calico_vxlan_mode in ['Always', 'CrossSubnet']" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Get Calico {{ calico_pool_name }} configuration" + command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json" + failed_when: False + changed_when: False + check_mode: no + register: calico + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Set calico_pool_conf" + set_fact: + calico_pool_conf: '{{ calico.stdout | from_json }}' + when: calico.rc == 0 and calico.stdout + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check if inventory match current cluster configuration" + assert: + that: + - calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int) + - calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet)) + - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode + - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode + msg: "Your inventory doesn't match the current cluster configuration" + when: + - calico_pool_conf is defined + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check kdd calico_datastore if calico_apiserver_enabled" + assert: + that: calico_datastore == "kdd" + msg: "When using calico apiserver you need to use the kubernetes datastore" + when: + - calico_apiserver_enabled + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check kdd calico_datastore if typha_enabled" + assert: + that: calico_datastore == "kdd" + msg: "When using typha you need to use the kubernetes datastore" + when: + - typha_enabled + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip mode is Never for calico ipv6" + assert: + that: + - "calico_ipip_mode_ipv6 in ['Never']" + msg: "Calico doesn't support ipip tunneling for the IPv6" + when: + - enable_dual_stack_networks + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/install.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/install.yml new file mode 100644 index 0000000..d55c910 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/install.yml @@ -0,0 +1,475 @@ +--- +- name: Calico | Install Wireguard packages + package: + name: "{{ item }}" + state: present + with_items: "{{ calico_wireguard_packages }}" + register: calico_package_install + until: calico_package_install is succeeded + retries: 4 + when: calico_wireguard_enabled + +- name: Calico | Copy calicoctl binary from download dir + copy: + src: "{{ local_release_dir }}/calicoctl" + dest: "{{ bin_dir }}/calicoctl" + mode: 0755 + remote_src: yes + +- name: Calico | Write Calico cni config + template: + src: "cni-calico.conflist.j2" + dest: "/etc/cni/net.d/calico.conflist.template" + mode: 0644 + owner: root + register: calico_conflist + notify: reset_calico_cni + +- name: Calico | Create calico certs directory + file: + dest: "{{ calico_cert_dir }}" + state: directory + mode: 0750 + owner: root + group: root + when: calico_datastore == "etcd" + +- name: Calico | Link etcd certificates for calico-node + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ calico_cert_dir }}/{{ item.d }}" + state: hard + mode: 0640 + force: yes + with_items: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + when: calico_datastore == "etcd" + +- name: Calico | Generate typha certs + include_tasks: typha_certs.yml + when: + - typha_secure + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Generate apiserver certs + include_tasks: calico_apiserver_certs.yml + when: + - calico_apiserver_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Install calicoctl wrapper script + template: + src: "calicoctl.{{ calico_datastore }}.sh.j2" + dest: "{{ bin_dir }}/calicoctl.sh" + mode: 0755 + owner: root + group: root + +- name: Calico | wait for etcd + uri: + url: "{{ etcd_access_addresses.split(',') | first }}/health" + validate_certs: no + client_cert: "{{ calico_cert_dir }}/cert.crt" + client_key: "{{ calico_cert_dir }}/key.pem" + register: result + until: result.status == 200 or result.status == 401 + retries: 10 + delay: 5 + run_once: true + when: calico_datastore == "etcd" + +- name: Calico | Check if calico network pool has already been configured + # noqa 306 - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf + retries: 4 + until: calico_conf.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined + assert: + that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1" + msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - 'calico_conf.stdout == "0"' + - calico_pool_cidr is defined + +- name: Calico | Check if calico IPv6 network pool has already been configured + # noqa 306 - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf_ipv6 + retries: 4 + until: calico_conf_ipv6.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + - enable_dual_stack_networks + +- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined + assert: + that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1" + msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" + - calico_pool_cidr_ipv6 is defined + - enable_dual_stack_networks + +- block: + - name: Calico | Check if extra directory is needed + stat: + path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}" + register: kdd_path + - name: Calico | Set kdd path when calico < v3.22.3 + set_fact: + calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" + when: + - calico_version is version('v3.22.3', '<') + - name: Calico | Set kdd path when calico > v3.22.2 + set_fact: + calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" + when: + - calico_version is version('v3.22.2', '>') + - name: Calico | Create calico manifests for kdd + assemble: + src: "{{ calico_kdd_path }}" + dest: "{{ kube_config_dir }}/kdd-crds.yml" + mode: 0644 + delimiter: "---\n" + regexp: ".*\\.yaml" + remote_src: true + + - name: Calico | Create Calico Kubernetes datastore resources + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/kdd-crds.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + when: + - inventory_hostname in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- block: + - name: Calico | Get existing FelixConfiguration + command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json" + register: _felix_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray FelixConfiguration + set_fact: + _felix_config: > + { + "kind": "FelixConfiguration", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "default", + }, + "spec": { + "ipipEnabled": {{ calico_ipip_mode != 'Never' }}, + "reportingInterval": "{{ calico_felix_reporting_interval }}", + "bpfLogLevel": "{{ calico_bpf_log_level }}", + "bpfEnabled": {{ calico_bpf_enabled | bool }}, + "bpfExternalServiceMode": "{{ calico_bpf_service_mode }}", + "wireguardEnabled": {{ calico_wireguard_enabled | bool }}, + "logSeverityScreen": "{{ calico_felix_log_severity_screen }}", + "vxlanEnabled": {{ calico_vxlan_mode != 'Never' }}, + "featureDetectOverride": "{{ calico_feature_detect_override }}" + } + } + + - name: Calico | Process FelixConfiguration + set_fact: + _felix_config: "{{ _felix_cmd.stdout | from_json | combine(_felix_config, recursive=True) }}" + when: + - _felix_cmd is success + + - name: Calico | Configure calico FelixConfiguration + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- block: + - name: Calico | Get existing calico network pool + command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json" + register: _calico_pool_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray calico network pool + set_fact: + _calico_pool: > + { + "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize | default(kube_network_node_prefix) }}, + "cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}", + "ipipMode": "{{ calico_ipip_mode }}", + "vxlanMode": "{{ calico_vxlan_mode }}", + "natOutgoing": {{ nat_outgoing|default(false) }} + } + } + + - name: Calico | Process calico network pool + set_fact: + _calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, recursive=True) }}" + when: + - _calico_pool_cmd is success + + - name: Calico | Configure calico network pool + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- block: + - name: Calico | Get existing calico ipv6 network pool + command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" + register: _calico_pool_ipv6_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray calico network pool + set_fact: + _calico_pool_ipv6: > + { + "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}-ipv6", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }}, + "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}", + "ipipMode": "{{ calico_ipip_mode_ipv6 }}", + "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", + "natOutgoing": {{ nat_outgoing_ipv6|default(false) }} + } + } + + - name: Calico | Process calico ipv6 network pool + set_fact: + _calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, recursive=True) }}" + when: + - _calico_pool_ipv6_cmd is success + + - name: Calico | Configure calico ipv6 network pool + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + - enable_dual_stack_networks | bool + +- name: Populate Service External IPs + set_fact: + _service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}" + with_items: "{{ calico_advertise_service_external_ips }}" + run_once: yes + +- name: Populate Service LoadBalancer IPs + set_fact: + _service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}" + with_items: "{{ calico_advertise_service_loadbalancer_ips }}" + run_once: yes + +- name: "Determine nodeToNodeMesh needed state" + set_fact: + nodeToNodeMeshEnabled: "false" + when: + - peer_with_router|default(false) or peer_with_calico_rr|default(false) + - inventory_hostname in groups['k8s_cluster'] + run_once: yes + +- block: + - name: Calico | Get existing BGP Configuration + command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json" + register: _bgp_config_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray BGP Configuration + set_fact: + _bgp_config: > + { + "kind": "BGPConfiguration", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "default", + }, + "spec": { + "listenPort": {{ calico_bgp_listen_port }}, + "logSeverityScreen": "Info", + {% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %} + "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} , + {% if calico_advertise_cluster_ips|default(false) %} + "serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %} + {% if calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} + "serviceExternalIPs": {{ _service_external_ips|default([]) }} + } + } + + - name: Calico | Process BGP Configuration + set_fact: + _bgp_config: "{{ _bgp_config_cmd.stdout | from_json | combine(_bgp_config, recursive=True) }}" + when: + - _bgp_config_cmd is success + + - name: Calico | Set up BGP Configuration + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Create calico manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico-config, file: calico-config.yml, type: cm} + - {name: calico-node, file: calico-node.yml, type: ds} + - {name: calico, file: calico-node-sa.yml, type: sa} + - {name: calico, file: calico-cr.yml, type: clusterrole} + - {name: calico, file: calico-crb.yml, type: clusterrolebinding} + - {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm } + register: calico_node_manifests + when: + - inventory_hostname in groups['kube_control_plane'] + - rbac_enabled or item.type not in rbac_resources + +- name: Calico | Create calico manifests for typha + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico, file: calico-typha.yml, type: typha} + register: calico_node_typha_manifest + when: + - inventory_hostname in groups['kube_control_plane'] + - typha_enabled + +- name: Calico | get calico apiserver caBundle + command: "{{ bin_dir }}/kubectl get secret -n calico-apiserver calico-apiserver-certs -o jsonpath='{.data.apiserver\\.crt}'" + changed_when: false + register: calico_apiserver_cabundle + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_apiserver_enabled + +- name: Calico | set calico apiserver caBundle fact + set_fact: + calico_apiserver_cabundle: "{{ calico_apiserver_cabundle.stdout }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_apiserver_enabled + +- name: Calico | Create calico manifests for apiserver + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico, file: calico-apiserver.yml, type: calico-apiserver} + register: calico_apiserver_manifest + when: + - inventory_hostname in groups['kube_control_plane'] + - calico_apiserver_enabled + +- name: Start Calico resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_node_manifests.results }}" + - "{{ calico_node_typha_manifest.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + +- name: Start Calico apiserver resources + kube: + name: "{{ item.item.name }}" + namespace: "calico-apiserver" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_apiserver_manifest.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + +- name: Wait for calico kubeconfig to be created + wait_for: + path: /etc/cni/net.d/calico-kubeconfig + when: + - inventory_hostname not in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- name: Calico | Create Calico ipam manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico, file: calico-ipamconfig.yml, type: ipam} + when: + - inventory_hostname in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- name: Calico | Create ipamconfig resources + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/calico-ipamconfig.yml" + state: "latest" + register: resource_result + until: resource_result is succeeded + retries: 4 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_datastore == "kdd" + +- include_tasks: peer_with_calico_rr.yml + when: + - peer_with_calico_rr|default(false) + +- include_tasks: peer_with_router.yml + when: + - peer_with_router|default(false) diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/main.yml new file mode 100644 index 0000000..81844fa --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- import_tasks: pre.yml + +- import_tasks: repos.yml + +- include_tasks: install.yml diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml new file mode 100644 index 0000000..efa98c5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml @@ -0,0 +1,86 @@ +--- +- name: Calico | Set lable for groups nodes # noqa 301 305 + shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite" + changed_when: false + register: calico_group_id_label + until: calico_group_id_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + when: + - calico_group_id is defined + +- name: Calico | Configure peering with route reflectors at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "{{ calico_rr_id }}-to-node" + }, + "spec": { + "peerSelector": "calico-rr-id == '{{ calico_rr_id }}'", + "nodeSelector": "calico-group-id == '{{ calico_group_id }}'" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - calico_rr_id is defined + - calico_group_id is defined + - inventory_hostname in groups['calico_rr'] + +- name: Calico | Configure peering with route reflectors at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "peer-to-rrs" + }, + "spec": { + "nodeSelector": "!has(i-am-a-route-reflector)", + "peerSelector": "has(i-am-a-route-reflector)" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ groups['calico_rr'] | default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_rr_id is not defined or calico_group_id is not defined + +- name: Calico | Configure route reflectors to peer with each other + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "rr-mesh" + }, + "spec": { + "nodeSelector": "has(i-am-a-route-reflector)", + "peerSelector": "has(i-am-a-route-reflector)" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ groups['calico_rr'] | default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/peer_with_router.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/peer_with_router.yml new file mode 100644 index 0000000..a698ed1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/peer_with_router.yml @@ -0,0 +1,77 @@ +--- +- name: Calico | Configure peering with router(s) at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}" + }, + "spec": { + "asNumber": "{{ item.as }}", + "peerIP": "{{ item.router_id }}" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Configure node asNumber for per node peering + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "Node", + "metadata": { + "name": "{{ inventory_hostname }}" + }, + "spec": { + "bgp": { + "asNumber": "{{ local_as }}" + }, + "orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}] + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - inventory_hostname in groups['k8s_cluster'] + - local_as is defined + - groups['calico_rr'] | default([]) | length == 0 + +- name: Calico | Configure peering with router(s) at node scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}" + }, + "spec": { + "asNumber": "{{ item.as }}", + "node": "{{ inventory_hostname }}", + "peerIP": "{{ item.router_id }}", + "sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - inventory_hostname in groups['k8s_cluster'] diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/pre.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/pre.yml new file mode 100644 index 0000000..162aca1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/pre.yml @@ -0,0 +1,46 @@ +--- +- name: Slurp CNI config + slurp: + src: /etc/cni/net.d/10-calico.conflist + register: calico_cni_config_slurp + failed_when: false + +- block: + - name: Set fact calico_cni_config from slurped CNI config + set_fact: + calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}" + - name: Set fact calico_datastore to etcd if needed + set_fact: + calico_datastore: etcd + when: + - "'plugins' in calico_cni_config" + - "'etcd_endpoints' in calico_cni_config.plugins.0" + when: calico_cni_config_slurp.content is defined + +- name: Calico | Get kubelet hostname + shell: >- + set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' + | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 + args: + executable: /bin/bash + register: calico_kubelet_name + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - "cloud_provider is defined" + +- name: Calico | Gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + tags: + - facts diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/repos.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/repos.yml new file mode 100644 index 0000000..dd29f45 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/repos.yml @@ -0,0 +1,21 @@ +--- +- name: Calico | Add wireguard yum repo + when: + - calico_wireguard_enabled + block: + + - name: Calico | Add wireguard yum repo + yum_repository: + name: copr:copr.fedorainfracloud.org:jdoss:wireguard + file: _copr:copr.fedorainfracloud.org:jdoss:wireguard + description: Copr repo for wireguard owned by jdoss + baseurl: "{{ calico_wireguard_repo }}" + gpgcheck: yes + gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no + when: + - ansible_os_family in ['RedHat'] + - ansible_distribution not in ['Fedora'] + - ansible_facts['distribution_major_version'] | int < 9 diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/reset.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/reset.yml new file mode 100644 index 0000000..48d2e5a --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/reset.yml @@ -0,0 +1,30 @@ +--- +- name: reset | check vxlan.calico network device + stat: + path: /sys/class/net/vxlan.calico + get_attributes: no + get_checksum: no + get_mime: no + register: vxlan + +- name: reset | remove the network vxlan.calico device created by calico + command: ip link del vxlan.calico + when: vxlan.stat.exists + +- name: reset | check dummy0 network device + stat: + path: /sys/class/net/dummy0 + get_attributes: no + get_checksum: no + get_mime: no + register: dummy0 + +- name: reset | remove the network device created by calico + command: ip link del dummy0 + when: dummy0.stat.exists + +- name: reset | get and remove remaining routes set by bird + shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird " + args: + executable: /bin/bash + changed_when: false diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/typha_certs.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/typha_certs.yml new file mode 100644 index 0000000..5d3f279 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/tasks/typha_certs.yml @@ -0,0 +1,51 @@ +--- +- name: Calico | Check if typha-server exists + command: "{{ kubectl }} -n kube-system get secret typha-server" + register: typha_server_secret + changed_when: false + failed_when: false + +- name: Calico | Ensure calico certs dir + file: + path: /etc/calico/certs + state: directory + mode: 0755 + when: typha_server_secret.rc != 0 + +- name: Calico | Copy ssl script for typha certs + template: + src: make-ssl-calico.sh.j2 + dest: "{{ bin_dir }}/make-ssl-typha.sh" + mode: 0755 + when: typha_server_secret.rc != 0 + +- name: Calico | Copy ssl config for typha certs + copy: + src: openssl.conf + dest: /etc/calico/certs/openssl.conf + mode: 0644 + when: typha_server_secret.rc != 0 + +- name: Calico | Generate typha certs + command: >- + {{ bin_dir }}/make-ssl-typha.sh + -f /etc/calico/certs/openssl.conf + -c {{ kube_cert_dir }} + -d /etc/calico/certs + -s typha + when: typha_server_secret.rc != 0 + +- name: Calico | Create typha tls secrets + command: >- + {{ kubectl }} -n kube-system + create secret tls {{ item.name }} + --cert {{ item.cert }} + --key {{ item.key }} + with_items: + - name: typha-server + cert: /etc/calico/certs/typha-server.crt + key: /etc/calico/certs/typha-server.key + - name: typha-client + cert: /etc/calico/certs/typha-client.crt + key: /etc/calico/certs/typha-client.key + when: typha_server_secret.rc != 0 diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 new file mode 100644 index 0000000..a1bdfcb --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 @@ -0,0 +1,10 @@ +# This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change +# or be removed in future releases without further warning. +# +# Namespace and namespace-scoped resources. +apiVersion: v1 +kind: Namespace +metadata: + labels: + name: calico-apiserver + name: calico-apiserver diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 new file mode 100644 index 0000000..dabc7a3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 @@ -0,0 +1,287 @@ +# Policy to ensure the API server isn't cut off. Can be modified, but ensure +# that the main API server is always able to reach the Calico API server. +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-apiserver + namespace: calico-apiserver +spec: + podSelector: + matchLabels: + apiserver: "true" + ingress: + - ports: + - protocol: TCP + port: 5443 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: calico-api + namespace: calico-apiserver +spec: + ports: + - name: apiserver + port: 443 + protocol: TCP + targetPort: 5443 + selector: + apiserver: "true" + type: ClusterIP + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + apiserver: "true" + k8s-app: calico-apiserver + name: calico-apiserver + namespace: calico-apiserver +spec: + replicas: 1 + selector: + matchLabels: + apiserver: "true" + strategy: + type: Recreate + template: + metadata: + labels: + apiserver: "true" + k8s-app: calico-apiserver + name: calico-apiserver + namespace: calico-apiserver + spec: + containers: + - args: + - --secure-port=5443 + env: + - name: DATASTORE_TYPE + value: kubernetes + image: {{ calico_apiserver_image_repo }}:{{ calico_apiserver_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + livenessProbe: + httpGet: + path: /version + port: 5443 + scheme: HTTPS + initialDelaySeconds: 90 + periodSeconds: 10 + name: calico-apiserver + readinessProbe: + exec: + command: + - /code/filecheck + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + privileged: false + runAsUser: 0 + volumeMounts: + - mountPath: /code/apiserver.local.config/certificates + name: calico-apiserver-certs + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + restartPolicy: Always + serviceAccount: calico-apiserver + serviceAccountName: calico-apiserver + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - name: calico-apiserver-certs + secret: + secretName: calico-apiserver-certs + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-apiserver + namespace: calico-apiserver + +--- + +# Cluster-scoped resources below here. +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v3.projectcalico.org +spec: + group: projectcalico.org + groupPriorityMinimum: 1500 + caBundle: {{ calico_apiserver_cabundle }} + service: + name: calico-api + namespace: calico-apiserver + port: 443 + version: v3 + versionPriority: 200 + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-crds +rules: +- apiGroups: + - extensions + - networking.k8s.io + - "" + resources: + - networkpolicies + - nodes + - namespaces + - pods + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - globalnetworkpolicies + - networkpolicies + - clusterinformations + - hostendpoints + - globalnetworksets + - networksets + - bgpconfigurations + - bgppeers + - felixconfigurations + - kubecontrollersconfigurations + - ippools + - ipreservations + - ipamblocks + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - policy + resourceNames: + - calico-apiserver + resources: + - podsecuritypolicies + verbs: + - use + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-extension-apiserver-auth-access +rules: +- apiGroups: + - "" + resourceNames: + - extension-apiserver-authentication + resources: + - configmaps + verbs: + - list + - watch + - get +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-webhook-reader +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-access-crds +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-crds +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-delegate-auth +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-webhook-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-webhook-reader +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-extension-apiserver-auth-access +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-extension-apiserver-auth-access +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-config.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-config.yml.j2 new file mode 100644 index 0000000..568cc00 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -0,0 +1,27 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: +{% if calico_datastore == "etcd" %} + etcd_endpoints: "{{ etcd_access_addresses }}" + etcd_ca: "/calico-secrets/ca_cert.crt" + etcd_cert: "/calico-secrets/cert.crt" + etcd_key: "/calico-secrets/key.pem" +{% elif calico_datastore == "kdd" and typha_enabled %} + # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas + # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is + # essential. + typha_service_name: "calico-typha" +{% endif %} +{% if calico_network_backend == 'bird' %} + cluster_type: "kubespray,bgp" + calico_backend: "bird" +{% else %} + cluster_type: "kubespray" + calico_backend: "{{ calico_network_backend }}" +{% endif %} +{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %} + as: "{{ local_as|default(global_as_num) }}" +{% endif -%} diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-cr.yml.j2 new file mode 100644 index 0000000..b911b87 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -0,0 +1,168 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + - configmaps + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + - watch + - list +{% if calico_datastore == "kdd" %} + # Used to discover Typhas. + - get +{% endif %} + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch +{% if calico_datastore == "etcd" %} + - apiGroups: + - policy + resourceNames: + - privileged + resources: + - podsecuritypolicies + verbs: + - use +{% elif calico_datastore == "kdd" %} + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +{% endif %} + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-crb.yml.j2 new file mode 100644 index 0000000..f747bfd --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 new file mode 100644 index 0000000..af7e211 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: crd.projectcalico.org/v1 +kind: IPAMConfig +metadata: + name: default +spec: + autoAllocateBlocks: {{ calico_ipam_autoallocateblocks }} + strictAffinity: {{ calico_ipam_strictaffinity }} + maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }} diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 new file mode 100644 index 0000000..ea721b3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-node.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-node.yml.j2 new file mode 100644 index 0000000..3af01c8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -0,0 +1,464 @@ +--- +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: +{% if calico_datastore == "etcd" %} + kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}" +{% endif %} +{% if calico_felix_prometheusmetricsenabled %} + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}" +{% endif %} + spec: + nodeSelector: + {{ calico_ds_nodeselector }} + priorityClassName: system-node-critical + hostNetwork: true + serviceAccountName: calico-node + tolerations: + - operator: Exists + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + initContainers: +{% if calico_datastore == "kdd" %} + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true +{% endif %} + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # Install CNI binaries + - name: UPDATE_CNI_BINARIES + value: "true" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG_FILE + value: "/host/etc/cni/net.d/calico.conflist.template" + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" +{% if calico_datastore == "kdd" %} + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {{ calico_flexvol_image_repo }}:{{ calico_flexvol_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The location of the Calico etcd cluster. +{% if calico_datastore == "etcd" %} + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert +{% elif calico_datastore == "kdd" %} + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" +{% if typha_enabled %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name +{% if typha_secure %} + - name: FELIX_TYPHACN + value: typha-server + - name: FELIX_TYPHACAFILE + value: /etc/typha-ca/ca.crt + - name: FELIX_TYPHACERTFILE + value: /etc/typha-client/typha-client.crt + - name: FELIX_TYPHAKEYFILE + value: /etc/typha-client/typha-client.key +{% endif %} +{% endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" +{% endif %} +{% if calico_network_backend == 'vxlan' %} + - name: FELIX_VXLANVNI + value: "{{ calico_vxlan_vni }}" + - name: FELIX_VXLANPORT + value: "{{ calico_vxlan_port }}" +{% endif %} + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + valueFrom: + configMapKeyRef: + name: calico-config + key: cluster_type + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ calico_endpoint_to_host_action|default('RETURN') }}" + - name: FELIX_HEALTHHOST + value: "{{ calico_healthhost }}" +{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %} + - name: FELIX_KUBENODEPORTRANGES + value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}" +{% endif %} + - name: FELIX_IPTABLESBACKEND + value: "{{ calico_iptables_backend }}" + - name: FELIX_IPTABLESLOCKTIMEOUTSECS + value: "{{ calico_iptables_lock_timeout_secs }}" +# should be set in etcd before deployment +# # Configure the IP Pool from which Pod IPs will be chosen. +# - name: CALICO_IPV4POOL_CIDR +# value: "{{ calico_pool_cidr | default(kube_pods_subnet) }}" + - name: CALICO_IPV4POOL_IPIP + value: "{{ calico_ipv4pool_ipip }}" + - name: FELIX_IPV6SUPPORT + value: "{{ enable_dual_stack_networks | default(false) }}" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ calico_loglevel }}" + # Set Calico startup logging to "error" + - name: CALICO_STARTUP_LOGLEVEL + value: "{{ calico_node_startup_loglevel }}" + # Enable or disable usage report + - name: FELIX_USAGEREPORTINGENABLED + value: "{{ calico_usage_reporting }}" + # Set MTU for tunnel device used if ipip is enabled +{% if calico_mtu is defined %} + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" +{% endif %} + - name: FELIX_CHAININSERTMODE + value: "{{ calico_felix_chaininsertmode }}" + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "{{ calico_felix_prometheusmetricsenabled }}" + - name: FELIX_PROMETHEUSMETRICSPORT + value: "{{ calico_felix_prometheusmetricsport }}" + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "{{ calico_felix_prometheusgometricsenabled }}" + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "{{ calico_felix_prometheusprocessmetricsenabled }}" +{% if calico_ip_auto_method is defined %} + - name: IP_AUTODETECTION_METHOD + value: "{{ calico_ip_auto_method }}" +{% else %} + - name: NODEIP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: IP_AUTODETECTION_METHOD + value: "can-reach=$(NODEIP)" +{% endif %} + - name: IP + value: "autodetect" +{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %} + - name: IP6_AUTODETECTION_METHOD + value: "{{ calico_ip6_auto_method }}" +{% endif %} +{% if calico_felix_mtu_iface_pattern is defined %} + - name: FELIX_MTUIFACEPATTERN + value: "{{ calico_felix_mtu_iface_pattern }}" +{% endif %} +{% if enable_dual_stack_networks %} + - name: IP6 + value: autodetect +{% endif %} +{% if calico_use_default_route_src_ipaddr|default(false) %} + - name: FELIX_DEVICEROUTESOURCEADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_IGNORELOOSERPF + value: "{{ calico_node_ignorelooserpf }}" + - name: CALICO_MANAGE_CNI + value: "true" +{% if calico_node_extra_envs is defined %} +{% for key in calico_node_extra_envs %} + - name: {{ key }} + value: "{{ calico_node_extra_envs[key] }}" +{% endfor %} +{% endif %} + securityContext: + privileged: true + resources: + limits: + cpu: {{ calico_node_cpu_limit }} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live +{% if calico_network_backend == "bird" %} + - -bird-live +{% endif %} + periodSeconds: 10 + initialDelaySeconds: 10 + timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }} + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node +{% if calico_network_backend == "bird" %} + - -bird-ready +{% endif %} + - -felix-ready + periodSeconds: 10 + timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }} + failureThreshold: 6 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false +{% if calico_datastore == "etcd" %} + - mountPath: /calico-secrets + name: etcd-certs + readOnly: true +{% endif %} + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false +{% if typha_secure %} + - name: typha-client + mountPath: /etc/typha-client + readOnly: true + - name: typha-cacert + subPath: ca.crt + mountPath: /etc/typha-ca/ca.crt + readOnly: true +{% endif %} + - name: policysync + mountPath: /var/run/nodeagent +{% if calico_bpf_enabled %} + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: sysfs + mountPath: /sys/fs/ + # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. + # If the host is known to mount that filesystem already then Bidirectional can be omitted. + mountPropagation: Bidirectional +{% endif %} + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + # Used to install CNI. + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin +{% if calico_datastore == "etcd" %} + # Mount in the etcd TLS secrets. + - name: etcd-certs + hostPath: + path: "{{ calico_cert_dir }}" +{% endif %} + # Mount the global iptables lock file, used by calico/node + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if calico_datastore == "kdd" %} + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks +{% endif %} +{% if typha_enabled and typha_secure %} + - name: typha-client + secret: + secretName: typha-client + items: + - key: tls.crt + path: typha-client.crt + - key: tls.key + path: typha-client.key + - name: typha-cacert + hostPath: + path: "/etc/kubernetes/ssl/" +{% endif %} +{% if calico_bpf_enabled %} + - name: sysfs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate +{% endif %} + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: "{{ kubelet_flexvolumes_plugins_dir | default('/usr/libexec/kubernetes/kubelet-plugins/volume/exec') }}/nodeagent~uds" + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-typha.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-typha.yml.j2 new file mode 100644 index 0000000..22d2f2c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calico-typha.yml.j2 @@ -0,0 +1,190 @@ +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha +{% if typha_prometheusmetricsenabled %} + - port: {{ typha_prometheusmetricsport }} + protocol: TCP + targetPort: http-metrics + name: metrics +{% endif %} + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ typha_replicas }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' +{% if typha_prometheusmetricsenabled %} + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ typha_prometheusmetricsport }}" +{% endif %} + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP +{% if typha_prometheusmetricsenabled %} + - containerPort: {{ typha_prometheusmetricsport }} + name: http-metrics + protocol: TCP +{% endif %} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + - name: TYPHA_MAXCONNECTIONSLOWERLIMIT + value: "{{ typha_max_connections_lower_limit }}" +{% if typha_secure %} + - name: TYPHA_CAFILE + value: /etc/ca/ca.crt + - name: TYPHA_CLIENTCN + value: typha-client + - name: TYPHA_SERVERCERTFILE + value: /etc/typha/server_certificate.pem + - name: TYPHA_SERVERKEYFILE + value: /etc/typha/server_key.pem +{% endif %} +{% if typha_prometheusmetricsenabled %} + # Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "true" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{ typha_prometheusmetricsport }}" +{% endif %} +{% if typha_secure %} + volumeMounts: + - mountPath: /etc/typha + name: typha-server + readOnly: true + - mountPath: /etc/ca/ca.crt + subPath: ca.crt + name: cacert + readOnly: true +{% endif %} + # Needed for version >=3.7 when the 'host-local' ipam is used + # Should never happen given templates/cni-calico.conflist.j2 + # Configure route aggregation based on pod CIDR. + # - name: USE_POD_CIDR + # value: "true" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 +{% if typha_secure %} + volumes: + - name: typha-server + secret: + secretName: typha-server + items: + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + - name: cacert + hostPath: + path: "{{ kube_cert_dir }}" +{% endif %} + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 new file mode 100644 index 0000000..fcde4a5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 @@ -0,0 +1,6 @@ +#!/bin/bash +ETCD_ENDPOINTS={{ etcd_access_addresses }} \ +ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ +ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ +ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ +{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 new file mode 100644 index 0000000..ef89f39 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 @@ -0,0 +1,8 @@ +#!/bin/bash +DATASTORE_TYPE=kubernetes \ +{% if inventory_hostname in groups['kube_control_plane'] %} +KUBECONFIG=/etc/kubernetes/admin.conf \ +{% else %} +KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \ +{% endif %} +{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/cni-calico.conflist.j2 new file mode 100644 index 0000000..5cdf1ac --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/cni-calico.conflist.j2 @@ -0,0 +1,86 @@ +{ + "name": "{{ calico_cni_name }}", + "cniVersion":"0.3.1", + "plugins":[ + { +{% if calico_datastore == "kdd" %} + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", +{% else %} +{% if cloud_provider is defined %} + "nodename": "{{ calico_kubelet_name.stdout }}", +{% else %} + "nodename": "{{ calico_baremetal_nodename }}", +{% endif %} +{% endif %} + "type": "calico", + "log_level": "info", +{% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", +{% endif %} +{% if calico_datastore == "etcd" %} + "etcd_endpoints": "{{ etcd_access_addresses }}", + "etcd_cert_file": "{{ calico_cert_dir }}/cert.crt", + "etcd_key_file": "{{ calico_cert_dir }}/key.pem", + "etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt", +{% endif %} +{% if calico_ipam_host_local is defined %} + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, +{% else %} + "ipam": { + "type": "calico-ipam", +{% if enable_dual_stack_networks %} + "assign_ipv6": "true", +{% if calico_cni_pool_ipv6 %} + "ipv6_pools": ["{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"], +{% endif %} +{% endif %} +{% if calico_cni_pool %} + "ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"], +{% endif %} + "assign_ipv4": "true" + }, +{% endif %} +{% if calico_allow_ip_forwarding %} + "container_settings": { + "allow_ip_forwarding": true + }, +{% endif %} +{% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %} + "feature_control": { + {% for fc in calico_feature_control -%} + {% set fcval = calico_feature_control[fc] -%} + "{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }} + {% endfor -%} + {{- "" }} + }, +{% endif %} +{% if enable_network_policy %} + "policy": { + "type": "k8s" + }, +{% endif %} +{% if calico_mtu is defined and calico_mtu is number %} + "mtu": {{ calico_mtu }}, +{% endif %} + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type":"portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type":"bandwidth", + "capabilities": { + "bandwidth": true + } + } + ] +} diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 new file mode 100644 index 0000000..7ececd4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: kube-system + name: kubernetes-services-endpoint +data: +{% if calico_bpf_enabled %} +{% if loadbalancer_apiserver is defined %} + KUBERNETES_SERVICE_HOST: "{{ apiserver_loadbalancer_domain_name }}" + KUBERNETES_SERVICE_PORT: "{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}" +{%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool %} + KUBERNETES_SERVICE_HOST: "127.0.0.1" + KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}" +{%- else %} + KUBERNETES_SERVICE_HOST: "{{ first_kube_control_plane_address }}" + KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}" +{%- endif %} +{% endif %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 new file mode 100644 index 0000000..94b2022 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 @@ -0,0 +1,102 @@ +#!/bin/bash + +# Author: Smana smainklh@gmail.com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail +usage() +{ + cat << EOF +Create self signed certificates + +Usage : $(basename $0) -f [-d ] + -h | --help : Show this message + -f | --config : Openssl configuration file + -d | --ssldir : Directory where the certificates will be installed + -c | --cadir : Directory where the existing CA is located + -s | --service : Service for the ca + + ex : + $(basename $0) -f openssl.conf -d /srv/ssl +EOF +} + +# Options parsing +while (($#)); do + case "$1" in + -h | --help) usage; exit 0;; + -f | --config) CONFIG=${2}; shift 2;; + -d | --ssldir) SSLDIR="${2}"; shift 2;; + -c | --cadir) CADIR="${2}"; shift 2;; + -s | --service) SERVICE="${2}"; shift 2;; + *) + usage + echo "ERROR : Unknown option" + exit 3 + ;; + esac +done + +if [ -z ${CONFIG} ]; then + echo "ERROR: the openssl configuration file is missing. option -f" + exit 1 +fi +if [ -z ${SSLDIR} ]; then + SSLDIR="/etc/calico/certs" +fi + +tmpdir=$(mktemp -d /tmp/calico_${SERVICE}_certs.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +mkdir -p ${SSLDIR} ${CADIR} + +# Root CA +if [ -e "$CADIR/ca.key" ]; then + # Reuse existing CA + cp $CADIR/{ca.crt,ca.key} . +else + openssl genrsa -out ca.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca.key -days {{certificates_duration}} -out ca.crt -subj "/CN=calico-${SERVICE}-ca" > /dev/null 2>&1 +fi + +if [ $SERVICE == "typha" ]; then + # Typha server + openssl genrsa -out typha-server.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key typha-server.key -out typha-server.csr -subj "/CN=typha-server" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in typha-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-server.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + + # Typha client + openssl genrsa -out typha-client.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key typha-client.key -out typha-client.csr -subj "/CN=typha-client" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in typha-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-client.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + +elif [ $SERVICE == "apiserver" ]; then + # calico-apiserver + openssl genrsa -out apiserver.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=calico-apiserver" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver.crt -days {{certificates_duration}} -extensions ssl_client_apiserver -extfile ${CONFIG} > /dev/null 2>&1 +else + echo "ERROR: the openssl configuration file is missing. option -s" + exit 1 +fi + +# Install certs +if [ -e "$CADIR/ca.key" ]; then + # No pass existing CA + rm -f ca.crt ca.key +fi + +mv {*.crt,*.key} ${SSLDIR}/ diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/amazon.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/amazon.yml new file mode 100644 index 0000000..83efdcd --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/amazon.yml @@ -0,0 +1,5 @@ +--- +calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/ +calico_wireguard_packages: + - wireguard-dkms + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/centos-9.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/centos-9.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/centos-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/debian.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/debian.yml new file mode 100644 index 0000000..baf603c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/debian.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/fedora.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/fedora.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/fedora.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/opensuse.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/opensuse.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/opensuse.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/redhat-9.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/redhat-9.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/redhat-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/redhat.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/redhat.yml new file mode 100644 index 0000000..a83a8a5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/redhat.yml @@ -0,0 +1,4 @@ +--- +calico_wireguard_packages: + - wireguard-dkms + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/calico/vars/rocky-9.yml b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/rocky-9.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/calico/vars/rocky-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/canal/defaults/main.yml new file mode 100644 index 0000000..419cc36 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/defaults/main.yml @@ -0,0 +1,33 @@ +--- +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +canal_masquerade: "true" + +# Etcd SSL dirs +canal_cert_dir: /etc/canal/certs + +# Canal Network Policy directory +canal_policy_dir: /etc/kubernetes/policy + +# Limits for apps +calico_node_memory_limit: 500M +calico_node_cpu_limit: 200m +calico_node_memory_requests: 64M +calico_node_cpu_requests: 50m +flannel_memory_limit: 500M +flannel_cpu_limit: 200m +flannel_memory_requests: 64M +flannel_cpu_requests: 50m + +# etcd cert filenames +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Set log path for calico CNI plugin. Set to false to disable logging to disk. +calico_cni_log_file_path: /var/log/calico/cni/cni.log diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/handlers/main.yml b/kubespray/extra_playbooks/roles/network_plugin/canal/handlers/main.yml new file mode 100644 index 0000000..7769b99 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: reset_canal_cni + command: /bin/true + notify: + - delete 10-canal.conflist + - delete canal-node containers + +- name: delete 10-canal.conflist + file: + path: /etc/canal/10-canal.conflist + state: absent + +- name: delete canal-node containers + shell: "docker ps -af name=k8s_POD_canal-node* -q | xargs --no-run-if-empty docker rm -f" diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/canal/tasks/main.yml new file mode 100644 index 0000000..4117d1c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/tasks/main.yml @@ -0,0 +1,103 @@ +--- +- name: Canal | Write Canal cni config + template: + src: "cni-canal.conflist.j2" + dest: "/etc/cni/net.d/canal.conflist.template" + mode: 0644 + owner: "{{ kube_owner }}" + register: canal_conflist + notify: reset_canal_cni + +- name: Canal | Create canal certs directory + file: + dest: "{{ canal_cert_dir }}" + state: directory + mode: 0750 + owner: root + group: root + +- name: Canal | Link etcd certificates for canal-node + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ canal_cert_dir }}/{{ item.d }}" + state: hard + mode: 0640 + force: yes + with_items: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + +- name: Slurp etcd cacert file + slurp: + src: "{{ canal_cert_dir }}/ca_cert.crt" + register: etcd_ca_cert_file + failed_when: false + +- name: Slurp etcd cert file + slurp: + src: "{{ canal_cert_dir }}/cert.crt" + register: etcd_cert_file + failed_when: false + +- name: Slurp etcd key file + slurp: + src: "{{ canal_cert_dir }}/key.pem" + register: etcd_key_file + failed_when: false + +# Flannel need etcd v2 API +- name: Canal | Set Flannel etcd configuration + command: |- + {{ bin_dir }}/etcdctl set /coreos.com/network/config \ + '{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }' + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + delegate_to: "{{ groups['etcd'][0] }}" + changed_when: false + run_once: true + environment: + ETCDCTL_API: 2 + ETCDCTL_CA_FILE: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" + ETCDCTL_CERT_FILE: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'][0] + '.pem' }}" + ETCDCTL_KEY_FILE: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'][0] + '-key.pem' }}" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Canal | Create canal node manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: canal-calico-etcd-secret, file: canal-secret-calico-etcd.yml, type: secret} + - {name: canal-config, file: canal-config.yaml, type: cm} + - {name: canal-node, file: canal-node.yaml, type: ds} + - {name: canal-kube-controllers, file: canal-calico-kube-controllers.yml, type: deployment} + - {name: canal-cr, file: canal-cr.yml, type: clusterrole} + - {name: canal, file: canal-node-sa.yml, type: sa} + - {name: calico-cr, file: canal-cr-calico-node.yml, type: clusterrole} + - {name: calico-kube-cr, file: canal-cr-calico-kube-controllers.yml, type: clusterrole} + - {name: calico-crd, file: canal-crd-calico.yml, type: crd} + - {name: flannel, file: canal-cr-flannel.yml, type: clusterrole} + - {name: canal, file: canal-crb-canal.yml, type: clusterrolebinding} + - {name: canal-calico, file: canal-crb-calico.yml, type: clusterrolebinding} + - {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding} + register: canal_manifests + when: + - inventory_hostname in groups['kube_control_plane'] + +- name: Canal | Install calicoctl wrapper script + template: + src: calicoctl.sh.j2 + dest: "{{ bin_dir }}/calicoctl.sh" + mode: 0755 + owner: root + group: root + +- name: Canal | Create network policy directory + file: + path: "{{ canal_policy_dir }}" + state: directory + mode: 0755 diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/calicoctl.sh.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/calicoctl.sh.j2 new file mode 100644 index 0000000..8343ef8 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/calicoctl.sh.j2 @@ -0,0 +1,6 @@ +#!/bin/bash +ETCD_ENDPOINTS={{ etcd_access_addresses }} \ +ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ +ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ +ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ +{{ bin_dir }}/calicoctl "$@" diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-calico-kube-controllers.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-calico-kube-controllers.yml.j2 new file mode 100644 index 0000000..1417022 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-calico-kube-controllers.yml.j2 @@ -0,0 +1,96 @@ +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + containers: + - name: calico-kube-controllers + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,namespace,serviceaccount,workloadendpoint,node + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0440 diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-config.yaml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-config.yaml.j2 new file mode 100644 index 0000000..8aab6fb --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-config.yaml.j2 @@ -0,0 +1,80 @@ +# This ConfigMap can be used to configure a self-hosted Canal installation. +# See `canal.yaml` for an example of a Canal deployment which uses +# the config in this ConfigMap. +kind: ConfigMap +apiVersion: v1 +metadata: + name: canal-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "{{ etcd_access_addresses }}" + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "/calico-secrets/etcd-ca" + etcd_cert: "/calico-secrets/etcd-cert" + etcd_key: "/calico-secrets/etcd-key" + + # Typha is disabled. + typha_service_name: "none" + + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "{{ canal_iface }}" + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "{{ canal_masquerade }}" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "canal", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "type": "calico", + "include_default_routes": true, + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", +{% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", +{% endif %} + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] + } + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": "{{ kube_pods_subnet }}", + "Backend": { + "Type": "vxlan" + } + } + diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-calico-kube-controllers.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-calico-kube-controllers.yml.j2 new file mode 100644 index 0000000..e3c03c4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-calico-kube-controllers.yml.j2 @@ -0,0 +1,83 @@ +# Source: calico/templates/calico-kube-controllers-rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are watched to check for existence as part of IPAM controller. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-calico-node.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-calico-node.yml.j2 new file mode 100644 index 0000000..d80a7e0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-calico-node.yml.j2 @@ -0,0 +1,133 @@ +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - canal + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 new file mode 100644 index 0000000..b2236d1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 @@ -0,0 +1,23 @@ +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr.yml.j2 new file mode 100644 index 0000000..1209c7b --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-cr.yml.j2 @@ -0,0 +1,30 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + verbs: + - create + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 new file mode 100644 index 0000000..415a2a2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -0,0 +1,27 @@ +--- +# Bind the calico ClusterRole to the canal ServiceAccount. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-canal.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-canal.yml.j2 new file mode 100644 index 0000000..9fcb0fc --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-canal.yml.j2 @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: canal +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 new file mode 100644 index 0000000..5960139 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -0,0 +1,14 @@ +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crd-calico.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crd-calico.yml.j2 new file mode 100644 index 0000000..4f0653a --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-crd-calico.yml.j2 @@ -0,0 +1,3929 @@ +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all interfaces with + BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled or Strict. [Default: Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing intepreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix''s (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s]' + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + floatingIPs: + default: Disabled + description: FloatingIPs configures whether or not Felix will program + floating IP addresses. + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for VXLAN networking. Optional as Felix determines + this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 new file mode 100644 index 0000000..954f6d7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-node.yaml.j2 new file mode 100644 index 0000000..529d4b9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -0,0 +1,418 @@ +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: canal + namespace: kube-system + labels: + k8s-app: canal +spec: + selector: + matchLabels: + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + template: + metadata: + labels: + k8s-app: canal + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Set the serviceaccount name to use for the Calico CNI plugin. + # We use canal-node instead of calico-node when using flannel networking. + - name: CALICO_CNI_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: canal-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: canal-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: "{{ calico_node_image_repo }}:{{ calico_node_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: "{{ calico_node_image_repo }}:{{ calico_node_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Set the serviceaccount name to use for the Calico CNI plugin. + # We use canal-node instead of calico-node when using flannel networking. + - name: CALICO_CNI_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,canal" + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: "60" + # No IP address needed. + - name: IP + value: "" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + limits: + cpu: {{ calico_node_cpu_limit }} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + # Runs the flannel daemon to enable vxlan networking between + # container hosts. + - name: flannel + image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}" + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"] + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + # The location of the etcd cluster. + - name: FLANNELD_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # Location of the CA certificate for etcd. + - name: FLANNELD_ETCD_CAFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: FLANNELD_ETCD_KEYFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: FLANNELD_ETCD_CERTFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # The interface flannel should run on. + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: canal-config + key: canal_iface + # Perform masquerade on traffic leaving the pod cidr. + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: canal-config + key: masquerade + # Write the subnet.env file to the mounted directory. + - name: FLANNELD_SUBNET_FILE + value: "/run/flannel/subnet.env" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/resolv.conf + name: resolv + - mountPath: /run/flannel + name: run-flannel + - mountPath: /calico-secrets + name: etcd-certs + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: flannel-cfg + configMap: + name: canal-config + # Used by canal-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used by flannel. + - name: run-flannel + hostPath: + path: /run/flannel + - name: resolv + hostPath: + path: /etc/resolv.conf + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-secret-calico-etcd.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-secret-calico-etcd.yml.j2 new file mode 100644 index 0000000..bed51c7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/canal-secret-calico-etcd.yml.j2 @@ -0,0 +1,18 @@ +# Source: calico/templates/calico-etcd-secrets.yaml +# The following contains k8s Secrets for use with a TLS enabled etcd cluster. +# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: calico-etcd-secrets + namespace: kube-system +data: + # Populate the following with etcd TLS configuration if desired, but leave blank if + # not using TLS for etcd. + # The keys below should be uncommented and the values populated with the base64 + # encoded contents of each file that would be associated with the TLS data. + # Example command for encoding a file contents: cat | base64 -w 0 + etcd-key: {{ etcd_key_file.content }} + etcd-cert: {{ etcd_cert_file.content }} + etcd-ca: {{ etcd_ca_cert_file.content }} diff --git a/kubespray/extra_playbooks/roles/network_plugin/canal/templates/cni-canal.conflist.j2 b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/cni-canal.conflist.j2 new file mode 100644 index 0000000..3902a81 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/canal/templates/cni-canal.conflist.j2 @@ -0,0 +1,34 @@ + { + "name": "canal", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "type": "calico", + "include_default_routes": true, + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", +{% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", +{% endif %} + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] + } diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/defaults/main.yml new file mode 100644 index 0000000..b58b39e --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/defaults/main.yml @@ -0,0 +1,256 @@ +--- +cilium_min_version_required: "1.10" +# Log-level +cilium_debug: false + +cilium_mtu: "" +cilium_enable_ipv4: true +cilium_enable_ipv6: false + +# Cilium agent health port +cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +cilium_cert_dir: /etc/cilium/certs +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Limits for apps +cilium_memory_limit: 500M +cilium_cpu_limit: 500m +cilium_memory_requests: 64M +cilium_cpu_requests: 100m + +# Overlay Network Mode +cilium_tunnel_mode: vxlan +# Optional features +cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +cilium_monitor_aggregation: medium +# Kube Proxy Replacement mode (strict/probe/partial) +cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +cilium_enable_legacy_services: false + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +cilium_wireguard_userspace_fallback: false + +# Enable Bandwidth Manager +# Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. +# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. +# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. +# Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +cilium_enable_bandwidth_manager: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +cilium_non_masquerade_cidrs: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + - 100.64.0.0/10 + - 192.0.0.0/24 + - 192.0.2.0/24 + - 192.88.99.0/24 + - 198.18.0.0/15 + - 198.51.100.0/24 + - 203.0.113.0/24 + - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +cilium_enable_hubble: false +### Enable Hubble Metrics +cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +cilium_agent_extra_volumes: [] +cilium_agent_extra_volume_mounts: [] + +cilium_agent_extra_env_vars: [] + +cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +cilium_operator_extra_volumes: [] +cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +cilium_bpf_map_dynamic_size_ratio: "0.0025" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +cilium_enable_well_known_identities: false + +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +cilium_monitor_aggregation_flags: "all" + +cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +cilium_disable_cnp_status_updates: true + +# Configure how long to wait for the Cilium DaemonSet to be ready again +cilium_rolling_restart_wait_retries_count: 30 +cilium_rolling_restart_wait_retries_delay_seconds: 10 + +# Cilium changed the default metrics exporter ports in 1.12 +cilium_agent_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9962', '9090') }}" +cilium_operator_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9963', '6942') }}" +cilium_hubble_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9965', '9091') }}" diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/apply.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/apply.yml new file mode 100644 index 0000000..b977c21 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/apply.yml @@ -0,0 +1,33 @@ +--- +- name: Cilium | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + +- name: Cilium | Wait for pods to run + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 + register: pods_not_ready + until: pods_not_ready.stdout.find("cilium")==-1 + retries: "{{ cilium_rolling_restart_wait_retries_count | int }}" + delay: "{{ cilium_rolling_restart_wait_retries_delay_seconds | int }}" + failed_when: false + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cilium | Hubble install + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_hubble_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + - cilium_enable_hubble and cilium_hubble_install diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/check.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/check.yml new file mode 100644 index 0000000..c65591f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/check.yml @@ -0,0 +1,63 @@ +--- +- name: Cilium | Check Cilium encryption `cilium_ipsec_key` for ipsec + assert: + that: + - "cilium_ipsec_key is defined" + msg: "cilium_ipsec_key should be defined to enable encryption using ipsec" + when: + - cilium_encryption_enabled + - cilium_encryption_type == "ipsec" + - cilium_tunnel_mode in ['vxlan'] + +# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled` +- name: Stop if `cilium_ipsec_enabled` is defined and `cilium_encryption_type` is not `ipsec` + assert: + that: cilium_encryption_type == 'ipsec' + msg: > + It is not possible to use `cilium_ipsec_enabled` when `cilium_encryption_type` is set to {{ cilium_encryption_type }}. + when: + - cilium_ipsec_enabled is defined + - cilium_ipsec_enabled + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + +- name: Stop if kernel version is too low for Cilium Wireguard encryption + assert: + that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') + when: + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + - cilium_encryption_enabled + - cilium_encryption_type == "wireguard" + - not ignore_assert_errors + +- name: Stop if bad Cilium identity allocation mode + assert: + that: cilium_identity_allocation_mode in ['crd', 'kvstore'] + msg: "cilium_identity_allocation_mode must be either 'crd' or 'kvstore'" + +- name: Stop if bad Cilium Cluster ID + assert: + that: + - cilium_cluster_id <= 255 + - cilium_cluster_id >= 0 + msg: "'cilium_cluster_id' must be between 1 and 255" + when: cilium_cluster_id is defined + +- name: Stop if bad encryption type + assert: + that: cilium_encryption_type in ['ipsec', 'wireguard'] + msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'" + when: cilium_encryption_enabled + +- name: Stop if cilium_version is < v1.10.0 + assert: + that: cilium_version | regex_replace('v') is version(cilium_min_version_required, '>=') + msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}" + +# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled` +- name: Set `cilium_encryption_type` to "ipsec" and if `cilium_ipsec_enabled` is true + set_fact: + cilium_encryption_type: ipsec + cilium_encryption_enabled: true + when: + - cilium_ipsec_enabled is defined + - cilium_ipsec_enabled diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/install.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/install.yml new file mode 100644 index 0000000..9e89b7b --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/install.yml @@ -0,0 +1,97 @@ +--- +- name: Cilium | Ensure BPFFS mounted + mount: + fstype: bpf + path: /sys/fs/bpf + src: bpffs + state: mounted + +- name: Cilium | Create Cilium certs directory + file: + dest: "{{ cilium_cert_dir }}" + state: directory + mode: 0750 + owner: root + group: root + when: + - cilium_identity_allocation_mode == "kvstore" + +- name: Cilium | Link etcd certificates for cilium + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ cilium_cert_dir }}/{{ item.d }}" + mode: 0644 + state: hard + force: yes + loop: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + when: + - cilium_identity_allocation_mode == "kvstore" + +- name: Cilium | Create hubble dir + file: + path: "{{ kube_config_dir }}/addons/hubble" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - cilium_hubble_install + +- name: Cilium | Create Cilium node manifests + template: + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" + mode: 0644 + loop: + - {name: cilium, file: config.yml, type: cm} + - {name: cilium-operator, file: crb.yml, type: clusterrolebinding} + - {name: cilium-operator, file: cr.yml, type: clusterrole} + - {name: cilium, file: crb.yml, type: clusterrolebinding} + - {name: cilium, file: cr.yml, type: clusterrole} + - {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"} + - {name: cilium, file: ds.yml, type: ds} + - {name: cilium-operator, file: deploy.yml, type: deploy} + - {name: cilium-operator, file: sa.yml, type: sa} + - {name: cilium, file: sa.yml, type: sa} + register: cilium_node_manifests + when: + - inventory_hostname in groups['kube_control_plane'] + - item.when | default(True) | bool + +- name: Cilium | Create Cilium Hubble manifests + template: + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}" + mode: 0644 + loop: + - {name: hubble, file: config.yml, type: cm} + - {name: hubble, file: crb.yml, type: clusterrolebinding} + - {name: hubble, file: cr.yml, type: clusterrole} + - {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: deploy.yml, type: deploy} + - {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: sa.yml, type: sa} + - {name: hubble, file: service.yml, type: service} + register: cilium_hubble_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + - cilium_enable_hubble and cilium_hubble_install + - item.when | default(True) | bool + +- name: Cilium | Enable portmap addon + template: + src: 000-cilium-portmap.conflist.j2 + dest: /etc/cni/net.d/000-cilium-portmap.conflist + mode: 0644 + when: cilium_enable_portmap + +- name: Cilium | Copy Ciliumcli binary from download dir + copy: + src: "{{ local_release_dir }}/cilium" + dest: "{{ bin_dir }}/cilium" + mode: 0755 + remote_src: yes diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/main.yml new file mode 100644 index 0000000..63c99dc --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- import_tasks: check.yml + +- include_tasks: install.yml + +- include_tasks: apply.yml diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/reset.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/reset.yml new file mode 100644 index 0000000..432df8a --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/reset.yml @@ -0,0 +1,9 @@ +--- +- name: reset | check and remove devices if still present + include_tasks: reset_iface.yml + vars: + iface: "{{ item }}" + loop: + - cilium_host + - cilium_net + - cilium_vxlan diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/reset_iface.yml b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/reset_iface.yml new file mode 100644 index 0000000..d84a065 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/tasks/reset_iface.yml @@ -0,0 +1,12 @@ +--- +- name: "reset | check if network device {{ iface }} is present" + stat: + path: "/sys/class/net/{{ iface }}" + get_attributes: no + get_checksum: no + get_mime: no + register: device_remains + +- name: "reset | remove network device {{ iface }}" + command: "ip link del {{ iface }}" + when: device_remains.stat.exists diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2 new file mode 100644 index 0000000..982a7c9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2 @@ -0,0 +1,13 @@ +{ + "cniVersion": "0.3.1", + "name": "cilium-portmap", + "plugins": [ + { + "type": "cilium-cni" + }, + { + "type": "portmap", + "capabilities": { "portMappings": true } + } + ] +} diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 new file mode 100644 index 0000000..8a40a66 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 @@ -0,0 +1,146 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies + - ciliumenvoyconfigs +{% endif %} + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumbgploadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumegressnatpolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io +{% endif %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 new file mode 100644 index 0000000..00f0835 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 new file mode 100644 index 0000000..5a5bd4a --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 @@ -0,0 +1,166 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: + replicas: {{ cilium_operator_replicas }} + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{% if cilium_enable_prometheus %} + annotations: + prometheus.io/port: "{{ cilium_operator_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - name: cilium-operator + image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-operator + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) +{% if cilium_operator_custom_args is string %} + - {{ cilium_operator_custom_args }} +{% else %} +{% for flag in cilium_operator_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_ACCESS_KEY_ID + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_SECRET_ACCESS_KEY + optional: true + - name: AWS_DEFAULT_REGION + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_DEFAULT_REGION + optional: true +{% if cilium_kube_proxy_replacement == 'strict' %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% if cilium_enable_prometheus %} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: prometheus + containerPort: {{ cilium_operator_scrape_port }} + hostPort: {{ cilium_operator_scrape_port }} + protocol: TCP +{% endif %} + livenessProbe: + httpGet: +{% if cilium_enable_ipv4 %} + host: 127.0.0.1 +{% else %} + host: '::1' +{% endif %} + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{cilium_cert_dir}}" + readOnly: true +{% endif %} +{% for volume_mount in cilium_operator_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }} +{% endfor %} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + tolerations: + - operator: Exists + volumes: + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{cilium_cert_dir}}" +{% endif %} +{% for volume in cilium_operator_extra_volumes %} + - {{ volume | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 new file mode 100644 index 0000000..c5d1893 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/config.yml.j2 new file mode 100644 index 0000000..7a524c6 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -0,0 +1,248 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + identity-allocation-mode: {{ cilium_identity_allocation_mode }} + +{% if cilium_identity_allocation_mode == "kvstore" %} + # This etcd-config contains the etcd endpoints of your cluster. If you use + # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config + etcd-config: |- + --- + endpoints: +{% for ip_addr in etcd_access_addresses.split(',') %} + - {{ ip_addr }} +{% endfor %} + + # In case you want to use TLS in etcd, uncomment the 'ca-file' line + # and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config + ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" + + # In case you want client to server authentication, uncomment the following + # lines and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config + key-file: "{{ cilium_cert_dir }}/key.pem" + cert-file: "{{ cilium_cert_dir }}/cert.crt" + + # kvstore + # https://docs.cilium.io/en/latest/cmdref/kvstore/ + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' +{% endif %} + + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. +{% if cilium_enable_prometheus %} + prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}" + operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}" + enable-metrics: "true" +{% endif %} + + # If you want to run cilium in debug mode change this value to true + debug: "{{ cilium_debug }}" + enable-ipv4: "{{ cilium_enable_ipv4 }}" + enable-ipv6: "{{ cilium_enable_ipv6 }}" + # If a serious issue occurs during Cilium startup, this + # invasive option may be set to true to remove all persistent + # state. Endpoints will not be restored using knowledge from a + # prior Cilium run, so they may receive new IP addresses upon + # restart. This also triggers clean-cilium-bpf-state. + clean-cilium-state: "false" + # If you want to clean cilium BPF state, set this to true; + # Removes all BPF maps from the filesystem. Upon restart, + # endpoints are restored with the same IP addresses, however + # any ongoing connections may be disrupted briefly. + # Loadbalancing decisions will be reset, so any ongoing + # connections via a service may be loadbalanced to a different + # backend after restart. + clean-cilium-bpf-state: "false" + + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ cilium_monitor_aggregation }}" + + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "524288" + bpf-ct-global-any-max: "262144" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "{{ cilium_tunnel_mode }}" + + # Enable Bandwidth Manager + # Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + # Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. + # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +{% if cilium_enable_bandwidth_manager %} + enable-bandwidth-manager: "true" +{% endif %} + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ cilium_cluster_name }}" + + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + #cluster-id: 1 +{% if cilium_cluster_id is defined %} + cluster-id: "{{ cilium_cluster_id }}" +{% endif %} + +# `wait-bpf-mount` is removed after v1.10.4 +# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da +{% if cilium_version | regex_replace('v') is version('1.10.4', '<') %} + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" +{% endif %} + + kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}" + +# `native-routing-cidr` is deprecated in 1.10, removed in 1.12. +# Replaced by `ipv4-native-routing-cidr` +# https://github.com/cilium/cilium/pull/16695 +{% if cilium_version | regex_replace('v') is version('1.12', '<') %} + native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% else %} +{% if cilium_native_routing_cidr | length %} + ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% endif %} +{% if cilium_native_routing_cidr_ipv6 | length %} + ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}" +{% endif %} +{% endif %} + + auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}" + + operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}" + + # Hubble settings +{% if cilium_enable_hubble %} + enable-hubble: "true" +{% if cilium_enable_hubble_metrics %} + hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}" + hubble-metrics: +{% for hubble_metrics_cycle in cilium_hubble_metrics %} + {{ hubble_metrics_cycle }} +{% endfor %} +{% endif %} + hubble-listen-address: ":4244" +{% if cilium_enable_hubble and cilium_hubble_install %} + hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt +{% endif %} +{% endif %} + + # IP Masquerade Agent + enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}" + +{% for key, value in cilium_config_extra_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} + + # Enable transparent network encryption +{% if cilium_encryption_enabled %} +{% if cilium_encryption_type == "ipsec" %} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + encrypt-node: "{{ cilium_ipsec_node_encryption }}" +{% endif %} + +{% if cilium_encryption_type == "wireguard" %} + enable-wireguard: "true" + enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}" +{% endif %} +{% endif %} + + # IPAM settings + ipam: "{{ cilium_ipam_mode }}" + + agent-health-port: "{{ cilium_agent_health_port }}" + +{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_host_root != '' %} + cgroup-root: "{{ cilium_cgroup_host_root }}" +{% endif %} + + bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}" + + enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}" + enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}" + + enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}" + + enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}" + + enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}" + + enable-well-known-identities: "{{ cilium_enable_well_known_identities }}" + + monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}" + + enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}" + + disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}" +{% if cilium_ip_masq_agent_enable %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ip-masq-agent + namespace: kube-system +data: + config: | + nonMasqueradeCIDRs: +{% for cidr in cilium_non_masquerade_cidrs %} + - {{ cidr }} +{% endfor %} + masqLinkLocal: {{ cilium_masq_link_local|bool }} + resyncInterval: "{{ cilium_ip_masq_resync_interval }}" +{% endif %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 new file mode 100644 index 0000000..a16211c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 @@ -0,0 +1,122 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +{% if cilium_version | regex_replace('v') is version('1.12', '<') %} +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - update +{% endif %} +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + # Deprecated for removal in v1.10 + - create + - list + - watch + - update + + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + - ciliumnodes + - ciliumnodes/status + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumegressnatpolicies +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.11.5', '<') %} + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints/finalizers + - ciliumnodes/finalizers + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies/finalizers +{% endif %} + verbs: + - '*' +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} +- apiGroups: + - cilium.io + resources: + - ciliumclusterwideenvoyconfigs + - ciliumenvoyconfigs + - ciliumegressgatewaypolicies + verbs: + - list + - watch +{% endif %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 new file mode 100644 index 0000000..d23897f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 new file mode 100644 index 0000000..08385b4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 @@ -0,0 +1,424 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium + namespace: kube-system + labels: + k8s-app: cilium +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + # Specifies the maximum number of Pods that can be unavailable during the update process. + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: +{% if cilium_enable_prometheus %} + prometheus.io/port: "{{ cilium_agent_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' + labels: + k8s-app: cilium + spec: + containers: + - name: cilium-agent + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map +{% if cilium_mtu != "" %} + - --mtu={{ cilium_mtu }} +{% endif %} +{% if cilium_agent_custom_args is string %} + - {{ cilium_agent_custom_args }} +{% else %} +{% for flag in cilium_agent_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + startupProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 105 + periodSeconds: 2 + successThreshold: 1 + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ +{% if cilium_kube_proxy_replacement == 'strict' %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% for env_var in cilium_agent_extra_env_vars %} + - {{ env_var | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} + lifecycle: + postStart: + exec: + command: + - "/cni-install.sh" + - "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}" +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} + - "--enable-debug={{ cilium_debug | string | lower }}" + - "--log-file={{ cilium_cni_log_file }}" +{% endif %} + preStop: + exec: + command: + - /cni-uninstall.sh + resources: + limits: + cpu: {{ cilium_cpu_limit }} + memory: {{ cilium_memory_limit }} + requests: + cpu: {{ cilium_cpu_requests }} + memory: {{ cilium_memory_requests }} +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} + ports: +{% endif %} +{% if cilium_enable_prometheus %} + - name: prometheus + containerPort: {{ cilium_agent_scrape_port }} + hostPort: {{ cilium_agent_scrape_port }} + protocol: TCP +{% endif %} +{% if cilium_enable_hubble_metrics %} + - name: hubble-metrics + containerPort: {{ cilium_hubble_scrape_port }} + hostPort: {{ cilium_hubble_scrape_port }} + protocol: TCP +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: cilium-run + mountPath: /var/run/cilium + - name: cni-path + mountPath: /host/opt/cni/bin + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{cilium_cert_dir}}" + readOnly: true +{% endif %} + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + mountPath: /etc/config + readOnly: true +{% endif %} + # Needed to be able to load kernel modules + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + mountPath: /etc/ipsec + readOnly: true +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + mountPath: /var/lib/cilium/tls/hubble + readOnly: true +{% endif %} +{% for volume_mount in cilium_agent_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + hostNetwork: true + initContainers: +{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %} + - name: mount-cgroup + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: CGROUP_ROOT + value: {{ cilium_cgroup_host_root }} + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %} + - name: apply-sysctl-overwrites + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} + - name: clean-cilium-state + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true +# Removed in 1.11 and up. +# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9 +{% if cilium_version | regex_replace('v') is version('1.11', '<') %} + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true +{% endif %} +{% if cilium_kube_proxy_replacement == 'strict' %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: {{ cilium_cgroup_host_root }} + mountPropagation: HostToContainer +{% endif %} + - name: cilium-run + mountPath: /var/run/cilium + resources: + requests: + cpu: 100m + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + hostNetwork: true +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: {{ cilium_cgroup_host_root }} + type: DirectoryOrCreate +{% endif %} + # To install cilium cni plugin in the host + - name: cni-path + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + # To install cilium cni configuration in the host + - name: etc-cni-netd + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + # To be able to load kernel modules + - name: lib-modules + hostPath: + path: /lib/modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{cilium_cert_dir}}" +{% endif %} + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + secretName: cilium-clustermesh + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + optional: true + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + configMap: + name: ip-masq-agent + optional: true + items: + - key: config + path: ip-masq-agent +{% endif %} +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-server-certs + optional: true + items: + - key: ca.crt + path: client-ca.crt + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key +{% endif %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 new file mode 100644 index 0000000..c03ac59 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 new file mode 100644 index 0000000..a5fcc56 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +data: + keys: {{ cilium_ipsec_key }} +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +type: Opaque \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/config.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/config.yml.j2 new file mode 100644 index 0000000..4f42abe --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/config.yml.j2 @@ -0,0 +1,87 @@ +--- +# Source: cilium/templates/hubble-relay-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + peer-service: unix:///var/run/cilium/hubble.sock + listen-address: :4245 + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt + disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} + disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} +--- +# Source: cilium/templates/hubble-ui-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-ui-envoy + namespace: kube-system +data: + envoy.yaml: | + static_resources: + listeners: + - name: listener_hubble_ui + address: + socket_address: + address: 0.0.0.0 + port_value: 8081 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ['*'] + routes: + - match: + prefix: '/api/' + route: + cluster: backend + max_grpc_timeout: 0s + prefix_rewrite: '/' + - match: + prefix: '/' + route: + cluster: frontend + cors: + allow_origin_string_match: + - prefix: '*' + allow_methods: GET, PUT, DELETE, POST, OPTIONS + allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout + max_age: '1728000' + expose_headers: grpc-status,grpc-message + http_filters: + - name: envoy.filters.http.grpc_web + - name: envoy.filters.http.cors + - name: envoy.filters.http.router + clusters: + - name: frontend + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + hosts: + - socket_address: + address: 127.0.0.1 + port_value: 8080 + - name: backend + connect_timeout: 0.25s + type: logical_dns + lb_policy: round_robin + http2_protocol_options: {} + hosts: + - socket_address: + address: 127.0.0.1 + port_value: 8090 diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 new file mode 100644 index 0000000..4a95565 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 @@ -0,0 +1,106 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hubble-generate-certs +rules: + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-server-certs + - hubble-relay-client-certs + - hubble-relay-server-certs + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - hubble-ca-cert + verbs: + - update + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-ca-secret + verbs: + - get +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +--- +# Source: cilium/templates/hubble-ui-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +rules: + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - "*" + verbs: + - get + - list + - watch diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 new file mode 100644 index 0000000..f033429 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 @@ -0,0 +1,44 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hubble-generate-certs +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-generate-certs +subjects: +- kind: ServiceAccount + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +--- +# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-ui +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-ui diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 new file mode 100644 index 0000000..dd97bbf --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 @@ -0,0 +1,49 @@ +--- +# Source: cilium/templates/hubble-generate-certs-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + schedule: "0 0 1 */4 *" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + - "--cilium-namespace=kube-system" + - "--hubble-ca-reuse-secret=true" + - "--hubble-ca-secret-name=hubble-ca-secret" + - "--hubble-ca-generate=true" + - "--hubble-ca-validity-duration=94608000s" + - "--hubble-ca-config-map-create=true" + - "--hubble-ca-config-map-name=hubble-ca-cert" + - "--hubble-server-cert-generate=true" + - "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io" + - "--hubble-server-cert-validity-duration=94608000s" + - "--hubble-server-cert-secret-name=hubble-server-certs" + - "--hubble-relay-client-cert-generate=true" + - "--hubble-relay-client-cert-validity-duration=94608000s" + - "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs" + - "--hubble-relay-server-cert-generate=false" + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 new file mode 100644 index 0000000..43dd02b --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 @@ -0,0 +1,161 @@ +--- +# Source: cilium/templates/hubble-relay-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "k8s-app" + operator: In + values: + - cilium + topologyKey: "kubernetes.io/hostname" + containers: + - name: hubble-relay + image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - hubble-relay + args: + - serve + ports: + - name: grpc + containerPort: 4245 + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + volumes: + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + - configMap: + name: hubble-ca-cert + items: + - key: ca.crt + path: hubble-server-ca.crt + name: tls +--- +# Source: cilium/templates/hubble-ui-deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + namespace: kube-system + labels: + k8s-app: hubble-ui + name: hubble-ui +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-ui + template: + metadata: + annotations: + labels: + k8s-app: hubble-ui + spec: + securityContext: + runAsUser: 1001 + serviceAccount: hubble-ui + serviceAccountName: hubble-ui + containers: + - name: frontend + image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 8080 + name: http + resources: + {} + - name: backend + image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: EVENTS_SERVER_PORT + value: "8090" + - name: FLOWS_API_ADDR + value: "hubble-relay:80" + ports: + - containerPort: 8090 + name: grpc + resources: + {} + - name: proxy + image: "{{ cilium_hubble_envoy_image_repo }}:{{ cilium_hubble_envoy_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 8081 + name: http + resources: + {} + command: ["envoy"] + args: + [ + "-c", + "/etc/envoy.yaml", + "-l", + "info" + ] + volumeMounts: + - name: hubble-ui-envoy-yaml + mountPath: /etc/envoy.yaml + subPath: envoy.yaml + volumes: + - name: hubble-ui-envoy-yaml + configMap: + name: hubble-ui-envoy diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/job.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/job.yml.j2 new file mode 100644 index 0000000..38a42bf --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/job.yml.j2 @@ -0,0 +1,45 @@ +--- +# Source: cilium/templates/hubble-generate-certs-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + - "--cilium-namespace=kube-system" + - "--hubble-ca-reuse-secret=true" + - "--hubble-ca-secret-name=hubble-ca-secret" + - "--hubble-ca-generate=true" + - "--hubble-ca-validity-duration=94608000s" + - "--hubble-ca-config-map-create=true" + - "--hubble-ca-config-map-name=hubble-ca-cert" + - "--hubble-server-cert-generate=true" + - "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io" + - "--hubble-server-cert-validity-duration=94608000s" + - "--hubble-server-cert-secret-name=hubble-server-certs" + - "--hubble-relay-client-cert-generate=true" + - "--hubble-relay-client-cert-validity-duration=94608000s" + - "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs" + - "--hubble-relay-server-cert-generate=false" + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 new file mode 100644 index 0000000..9b3203d --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 @@ -0,0 +1,23 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-relay + namespace: kube-system +--- +# Source: cilium/templates/hubble-ui-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-ui + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/service.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/service.yml.j2 new file mode 100644 index 0000000..56dba76 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cilium/templates/hubble/service.yml.j2 @@ -0,0 +1,58 @@ +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} +--- +# Source: cilium/templates/cilium-agent-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-metrics + namespace: kube-system + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "9091" + labels: + k8s-app: hubble +spec: + clusterIP: None + type: ClusterIP + ports: + - name: hubble-metrics + port: 9091 + protocol: TCP + targetPort: hubble-metrics + selector: + k8s-app: cilium +{% endif %} +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + port: 80 + targetPort: 4245 +--- +# Source: cilium/templates/hubble-ui-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-ui + labels: + k8s-app: hubble-ui + namespace: kube-system +spec: + selector: + k8s-app: hubble-ui + ports: + - name: http + port: 80 + targetPort: 8081 + type: ClusterIP diff --git a/kubespray/extra_playbooks/roles/network_plugin/cni/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/cni/tasks/main.yml new file mode 100644 index 0000000..b8bcec3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/cni/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: CNI | make sure /opt/cni/bin exists + file: + path: /opt/cni/bin + state: directory + mode: 0755 + owner: "{{ kube_owner }}" + recurse: true + +- name: CNI | Copy cni plugins + unarchive: + src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + dest: "/opt/cni/bin" + mode: 0755 + remote_src: yes diff --git a/kubespray/extra_playbooks/roles/network_plugin/flannel/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/flannel/defaults/main.yml new file mode 100644 index 0000000..cd1dcf1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/flannel/defaults/main.yml @@ -0,0 +1,28 @@ +--- +# Flannel public IP +# The address that flannel should advertise as how to access the system +# Disabled until https://github.com/coreos/flannel/issues/712 is fixed +# flannel_public_ip: "{{ access_ip|default(ip|default(fallback_ips[inventory_hostname])) }}" + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +flannel_backend_type: "vxlan" +flannel_vxlan_vni: 1 +flannel_vxlan_port: 8472 +flannel_vxlan_direct_routing: false + +# Limits for apps +flannel_memory_limit: 500M +flannel_cpu_limit: 300m +flannel_memory_requests: 64M +flannel_cpu_requests: 150m diff --git a/kubespray/extra_playbooks/roles/network_plugin/flannel/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/flannel/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/flannel/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/network_plugin/flannel/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/flannel/tasks/main.yml new file mode 100644 index 0000000..2fd82e9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,21 @@ +--- + +- name: Flannel | Stop if kernel version is too low for Flannel Wireguard encryption + assert: + that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') + when: + - kube_network_plugin == 'flannel' + - flannel_backend_type == 'wireguard' + - not ignore_assert_errors + +- name: Flannel | Create Flannel manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: flannel, file: cni-flannel-rbac.yml, type: sa} + - {name: kube-flannel, file: cni-flannel.yml, type: ds} + register: flannel_node_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/extra_playbooks/roles/network_plugin/flannel/tasks/reset.yml b/kubespray/extra_playbooks/roles/network_plugin/flannel/tasks/reset.yml new file mode 100644 index 0000000..2fd86e2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/flannel/tasks/reset.yml @@ -0,0 +1,24 @@ +--- +- name: reset | check cni network device + stat: + path: /sys/class/net/cni0 + get_attributes: no + get_checksum: no + get_mime: no + register: cni + +- name: reset | remove the network device created by the flannel + command: ip link del cni0 + when: cni.stat.exists + +- name: reset | check flannel network device + stat: + path: /sys/class/net/flannel.1 + get_attributes: no + get_checksum: no + get_mime: no + register: flannel + +- name: reset | remove the network device created by the flannel + command: ip link del flannel.1 + when: flannel.stat.exists diff --git a/kubespray/extra_playbooks/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 new file mode 100644 index 0000000..7c73b09 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 new file mode 100644 index 0000000..607d225 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -0,0 +1,170 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ kube_pods_subnet }}", + "EnableIPv4": true, +{% if enable_dual_stack_networks %} + "EnableIPv6": true, + "IPv6Network": "{{ kube_pods_subnet_ipv6 }}", +{% endif %} + "Backend": { + "Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %}, + "VNI": {{ flannel_vxlan_vni }}, + "Port": {{ flannel_vxlan_port }}, + "DirectRouting": {{ flannel_vxlan_direct_routing | to_json }} +{% endif %} + } + } +{% for arch in ['amd64', 'arm64', 'arm', 'ppc64le', 's390x'] %} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: +{% if arch == 'amd64' %} + name: kube-flannel +{% else %} + name: kube-flannel-ds-{{ arch }} +{% endif %} + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + priorityClassName: system-node-critical + serviceAccountName: flannel + containers: + - name: kube-flannel + image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ] + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - {{ arch }} + initContainers: + - name: install-cni-plugin + image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag | regex_replace(image_arch,'') }}{{ arch }} + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - operator: Exists + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: cni-plugin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/OWNERS b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/OWNERS new file mode 100644 index 0000000..84256aa --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +emeritus_approvers: +- oilbeater diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/defaults/main.yml new file mode 100644 index 0000000..430f698 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/defaults/main.yml @@ -0,0 +1,98 @@ +--- +kube_ovn_db_cpu_request: 500m +kube_ovn_db_memory_request: 200Mi +kube_ovn_db_cpu_limit: 3000m +kube_ovn_db_memory_limit: 3000Mi +kube_ovn_node_cpu_request: 200m +kube_ovn_node_memory_request: 200Mi +kube_ovn_node_cpu_limit: 1000m +kube_ovn_node_memory_limit: 800Mi +kube_ovn_cni_server_cpu_request: 200m +kube_ovn_cni_server_memory_request: 200Mi +kube_ovn_cni_server_cpu_limit: 1000m +kube_ovn_cni_server_memory_limit: 1Gi +kube_ovn_controller_cpu_request: 200m +kube_ovn_controller_memory_request: 200Mi +kube_ovn_controller_cpu_limit: 1000m +kube_ovn_controller_memory_limit: 1Gi +kube_ovn_pinger_cpu_request: 100m +kube_ovn_pinger_memory_request: 200Mi +kube_ovn_pinger_cpu_limit: 200m +kube_ovn_pinger_memory_limit: 400Mi +kube_ovn_monitor_memory_request: 200Mi +kube_ovn_monitor_cpu_request: 200m +kube_ovn_monitor_memory_limit: 200Mi +kube_ovn_monitor_cpu_limit: 200m +kube_ovn_dpdk_node_cpu_request: 1000m +kube_ovn_dpdk_node_memory_request: 2Gi +kube_ovn_dpdk_node_cpu_limit: 1000m +kube_ovn_dpdk_node_memory_limit: 2Gi + +kube_ovn_central_replics: 1 +kube_ovn_controller_replics: 1 + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false +kube_ovn_dpdk_tunnel_iface: br-phy + +## eip snat +kube_ovn_eip_snat_enabled: true + +## keep vm ip +kube_ovn_keep_vm_ip: true + +## cni config priority, default: 01 +kube_ovn_cni_config_priority: 01 diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/tasks/main.yml new file mode 100644 index 0000000..f720c51 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Kube-OVN | Label ovn-db node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kube-OVN | Create Kube-OVN manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: kube-ovn-crd, file: cni-kube-ovn-crd.yml} + - {name: ovn, file: cni-ovn.yml} + - {name: kube-ovn, file: cni-kube-ovn.yml} + register: kube_ovn_node_manifests diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 new file mode 100644 index 0000000..5878d2c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 @@ -0,0 +1,1160 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpc-nat-gateways.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vpc-nat-gateways + singular: vpc-nat-gateway + shortNames: + - vpc-nat-gw + kind: VpcNatGateway + listKind: VpcNatGatewayList + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.vpc + name: Vpc + type: string + - jsonPath: .spec.subnet + name: Subnet + type: string + - jsonPath: .spec.lanIp + name: LanIP + type: string + name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + lanIp: + type: string + subnet: + type: string + vpc: + type: string + selector: + type: array + items: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-eips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-eips + singular: iptables-eip + shortNames: + - eip + kind: IptablesEIP + listKind: IptablesEIPList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.ip + name: IP + type: string + - jsonPath: .spec.macAddress + name: Mac + type: string + - jsonPath: .status.nat + name: Nat + type: string + - jsonPath: .spec.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + ip: + type: string + nat: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + v4ip: + type: string + v6ip: + type: string + macAddress: + type: string + natGwDp: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-fip-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-fip-rules + singular: iptables-fip-rule + shortNames: + - fip + kind: IptablesFIPRule + listKind: IptablesFIPRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: Eip + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .spec.internalIp + name: InternalIp + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + internalIp: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-dnat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-dnat-rules + singular: iptables-dnat-rule + shortNames: + - dnat + kind: IptablesDnatRule + listKind: IptablesDnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: Eip + type: string + - jsonPath: .spec.protocol + name: Protocol + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .spec.internalIp + name: InternalIp + type: string + - jsonPath: .spec.externalPort + name: ExternalPort + type: string + - jsonPath: .spec.internalPort + name: InternalPort + type: string + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + externalPort: + type: string + protocol: + type: string + internalIp: + type: string + internalPort: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-snat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-snat-rules + singular: iptables-snat-rule + shortNames: + - snat + kind: IptablesSnatRule + listKind: IptablesSnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: EIP + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .spec.internalCIDR + name: InternalCIDR + type: string + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + internalCIDR: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpcs.kubeovn.io +spec: + group: kubeovn.io + versions: + - additionalPrinterColumns: + - jsonPath: .status.standby + name: Standby + type: boolean + - jsonPath: .status.subnets + name: Subnets + type: string + - jsonPath: .spec.namespaces + name: Namespaces + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + spec: + properties: + namespaces: + items: + type: string + type: array + staticRoutes: + items: + properties: + policy: + type: string + cidr: + type: string + nextHopIP: + type: string + type: object + type: array + policyRoutes: + items: + properties: + priority: + type: integer + action: + type: string + match: + type: string + nextHopIP: + type: string + type: object + type: array + vpcPeerings: + items: + properties: + remoteVpc: + type: string + localConnectIP: + type: string + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + type: string + lastUpdateTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + default: + type: boolean + defaultLogicalSwitch: + type: string + router: + type: string + standby: + type: boolean + subnets: + items: + type: string + type: array + vpcPeerings: + items: + type: string + type: array + tcpLoadBalancer: + type: string + tcpSessionLoadBalancer: + type: string + udpLoadBalancer: + type: string + udpSessionLoadBalancer: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + names: + kind: Vpc + listKind: VpcList + plural: vpcs + shortNames: + - vpc + singular: vpc + scope: Cluster +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ips.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: V4IP + type: string + jsonPath: .spec.v4IpAddress + - name: V6IP + type: string + jsonPath: .spec.v6IpAddress + - name: Mac + type: string + jsonPath: .spec.macAddress + - name: Node + type: string + jsonPath: .spec.nodeName + - name: Subnet + type: string + jsonPath: .spec.subnet + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + podName: + type: string + namespace: + type: string + subnet: + type: string + attachSubnets: + type: array + items: + type: string + nodeName: + type: string + ipAddress: + type: string + v4IpAddress: + type: string + v6IpAddress: + type: string + attachIps: + type: array + items: + type: string + macAddress: + type: string + attachMacs: + type: array + items: + type: string + containerID: + type: string + podType: + type: string + scope: Cluster + names: + plural: ips + singular: ip + kind: IP + shortNames: + - ip +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vips + singular: vip + shortNames: + - vip + kind: Vip + listKind: VipList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: V4IP + type: string + jsonPath: .spec.v4ip + - name: PV4IP + type: string + jsonPath: .spec.parentV4ip + - name: Mac + type: string + jsonPath: .spec.macAddress + - name: PMac + type: string + jsonPath: .spec.ParentMac + - name: V6IP + type: string + jsonPath: .spec.v6ip + - name: PV6IP + type: string + jsonPath: .spec.parentV6ip + - name: Subnet + type: string + jsonPath: .spec.subnet + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + mac: + type: string + pv4ip: + type: string + pv6ip: + type: string + pmac: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + namespace: + type: string + subnet: + type: string + attachSubnets: + type: array + items: + type: string + v4ip: + type: string + macAddress: + type: string + v6ip: + type: string + parentV4ip: + type: string + parentMac: + type: string + parentV6ip: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: subnets.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Provider + type: string + jsonPath: .spec.provider + - name: Vpc + type: string + jsonPath: .spec.vpc + - name: Protocol + type: string + jsonPath: .spec.protocol + - name: CIDR + type: string + jsonPath: .spec.cidrBlock + - name: Private + type: boolean + jsonPath: .spec.private + - name: NAT + type: boolean + jsonPath: .spec.natOutgoing + - name: Default + type: boolean + jsonPath: .spec.default + - name: GatewayType + type: string + jsonPath: .spec.gatewayType + - name: V4Used + type: number + jsonPath: .status.v4usingIPs + - name: V4Available + type: number + jsonPath: .status.v4availableIPs + - name: V6Used + type: number + jsonPath: .status.v6usingIPs + - name: V6Available + type: number + jsonPath: .status.v6availableIPs + - name: ExcludeIPs + type: string + jsonPath: .spec.excludeIps + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + v4availableIPs: + type: number + v4usingIPs: + type: number + v6availableIPs: + type: number + v6usingIPs: + type: number + activateGateway: + type: string + dhcpV4OptionsUUID: + type: string + dhcpV6OptionsUUID: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + vpc: + type: string + default: + type: boolean + protocol: + type: string + enum: + - IPv4 + - IPv6 + - Dual + cidrBlock: + type: string + namespaces: + type: array + items: + type: string + gateway: + type: string + provider: + type: string + excludeIps: + type: array + items: + type: string + vips: + type: array + items: + type: string + gatewayType: + type: string + allowSubnets: + type: array + items: + type: string + gatewayNode: + type: string + natOutgoing: + type: boolean + externalEgressGateway: + type: string + policyRoutingPriority: + type: integer + minimum: 1 + maximum: 32765 + policyRoutingTableID: + type: integer + minimum: 1 + maximum: 2147483647 + not: + enum: + - 252 # compat + - 253 # default + - 254 # main + - 255 # local + private: + type: boolean + vlan: + type: string + logicalGateway: + type: boolean + disableGatewayCheck: + type: boolean + disableInterConnection: + type: boolean + htbqos: + type: string + enableDHCP: + type: boolean + dhcpV4Options: + type: string + dhcpV6Options: + type: string + enableIPv6RA: + type: boolean + ipv6RAConfigs: + type: string + acls: + type: array + items: + type: object + properties: + direction: + type: string + enum: + - from-lport + - to-lport + priority: + type: integer + minimum: 0 + maximum: 32767 + match: + type: string + action: + type: string + enum: + - allow-related + - allow-stateless + - allow + - drop + - reject + scope: Cluster + names: + plural: subnets + singular: subnet + kind: Subnet + shortNames: + - subnet +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vlans.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + id: + type: integer + minimum: 0 + maximum: 4095 + provider: + type: string + vlanId: + type: integer + description: Deprecated in favor of id + providerInterfaceName: + type: string + description: Deprecated in favor of provider + required: + - provider + status: + type: object + properties: + subnets: + type: array + items: + type: string + additionalPrinterColumns: + - name: ID + type: string + jsonPath: .spec.id + - name: Provider + type: string + jsonPath: .spec.provider + scope: Cluster + names: + plural: vlans + singular: vlan + kind: Vlan + shortNames: + - vlan +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: provider-networks.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 12 + not: + enum: + - int + - external + spec: + type: object + properties: + defaultInterface: + type: string + maxLength: 15 + pattern: '^[^/\s]+$' + customInterfaces: + type: array + items: + type: object + properties: + interface: + type: string + maxLength: 15 + pattern: '^[^/\s]+$' + nodes: + type: array + items: + type: string + exchangeLinkName: + type: boolean + excludeNodes: + type: array + items: + type: string + required: + - defaultInterface + status: + type: object + properties: + ready: + type: boolean + readyNodes: + type: array + items: + type: string + vlans: + type: array + items: + type: string + conditions: + type: array + items: + type: object + properties: + node: + type: string + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + additionalPrinterColumns: + - name: DefaultInterface + type: string + jsonPath: .spec.defaultInterface + - name: Ready + type: boolean + jsonPath: .status.ready + scope: Cluster + names: + plural: provider-networks + singular: provider-network + kind: ProviderNetwork + listKind: ProviderNetworkList +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: security-groups.kubeovn.io +spec: + group: kubeovn.io + names: + plural: security-groups + singular: security-group + shortNames: + - sg + kind: SecurityGroup + listKind: SecurityGroupList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + ingressRules: + type: array + items: + type: object + properties: + ipVersion: + type: string + protocol: + type: string + priority: + type: integer + remoteType: + type: string + remoteAddress: + type: string + remoteSecurityGroup: + type: string + portRangeMin: + type: integer + portRangeMax: + type: integer + policy: + type: string + egressRules: + type: array + items: + type: object + properties: + ipVersion: + type: string + protocol: + type: string + priority: + type: integer + remoteType: + type: string + remoteAddress: + type: string + remoteSecurityGroup: + type: string + portRangeMin: + type: integer + portRangeMax: + type: integer + policy: + type: string + allowSameGroupTraffic: + type: boolean + status: + type: object + properties: + portGroup: + type: string + allowSameGroupTraffic: + type: boolean + ingressMd5: + type: string + egressMd5: + type: string + ingressLastSyncSuccess: + type: boolean + egressLastSyncSuccess: + type: boolean + subresources: + status: {} + conversion: + strategy: None +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: htbqoses.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: PRIORITY + type: string + jsonPath: .spec.priority + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + priority: + type: string # Value in range 0 to 4,294,967,295. + scope: Cluster + names: + plural: htbqoses + singular: htbqos + kind: HtbQos + shortNames: + - htbqos diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 new file mode 100644 index 0000000..c6eacc1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 @@ -0,0 +1,610 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kube-ovn-controller + namespace: kube-system + annotations: + kubernetes.io/description: | + kube-ovn controller +spec: + replicas: {{ kube_ovn_controller_replics }} + selector: + matchLabels: + app: kube-ovn-controller + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 100% + type: RollingUpdate + template: + metadata: + labels: + app: kube-ovn-controller + component: network + type: infra + spec: + tolerations: + - operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: kube-ovn-controller + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: kube-ovn-controller + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /kube-ovn/start-controller.sh + args: + - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{''}} + - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{''}} + - --default-gateway-check={{ kube_ovn_default_gateway_check|string }} + - --default-logical-gateway={{ kube_ovn_default_logical_gateway|string }} + - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{''}} + - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{''}} + - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}} + - --network-type={{ kube_ovn_network_type }} + - --default-interface-name={{ kube_ovn_default_interface_name|default('') }} + - --default-vlan-id={{ kube_ovn_default_vlan_id }} + - --pod-nic-type={{ kube_ovn_pod_nic_type }} + - --enable-lb={{ kube_ovn_enable_lb|string }} + - --enable-np={{ kube_ovn_enable_np|string }} + - --enable-eip-snat={{ kube_ovn_eip_snat_enabled }} + - --enable-external-vpc={{ kube_ovn_enable_external_vpc|string }} + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-controller.log + - --log_file_max_size=0 + - --keep-vm-ip={{ kube_ovn_keep_vm_ip }} + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - /kube-ovn/kube-ovn-controller-healthcheck + periodSeconds: 3 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - /kube-ovn/kube-ovn-controller-healthcheck + initialDelaySeconds: 300 + periodSeconds: 7 + failureThreshold: 5 + timeoutSeconds: 45 + resources: + requests: + cpu: {{ kube_ovn_controller_cpu_request }} + memory: {{ kube_ovn_controller_memory_request }} + limits: + cpu: {{ kube_ovn_controller_cpu_limit }} + memory: {{ kube_ovn_controller_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls + +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-ovn-cni + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the kube-ovn cni daemon. +spec: + selector: + matchLabels: + app: kube-ovn-cni + template: + metadata: + labels: + app: kube-ovn-cni + component: network + type: infra + spec: + tolerations: + - operator: Exists + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + hostPID: true + initContainers: + - name: install-cni + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/install-cni.sh"] + securityContext: + runAsUser: 0 + privileged: true + volumeMounts: + - mountPath: /opt/cni/bin + name: cni-bin + containers: + - name: cni-server + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - bash + - /kube-ovn/start-cniserver.sh + args: + - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} + - --encap-checksum={{ kube_ovn_encap_checksum | lower }} + - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}} + - --iface={{ kube_ovn_iface|default('') }} + - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} + - --network-type={{ kube_ovn_network_type }} + - --default-interface-name={{ kube_ovn_default_interface_name|default('') }} +{% if kube_ovn_mtu is defined %} + - --mtu={{ kube_ovn_mtu }} +{% endif %} + - --cni-conf-name={{ kube_ovn_cni_config_priority }}-kube-ovn.conflist + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-cni.log + - --log_file_max_size=0 + securityContext: + runAsUser: 0 + privileged: true + env: + - name: kube_ovn_enable_ssl + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MODULES + value: kube_ovn_fastpath.ko + - name: RPMS + value: openvswitch-kmod + volumeMounts: + - name: host-modules + mountPath: /lib/modules + readOnly: true + - name: shared-dir + mountPath: /var/lib/kubelet/pods + - mountPath: /etc/openvswitch + name: systemid + - mountPath: /etc/cni/net.d + name: cni-conf + - mountPath: /run/openvswitch + name: host-run-ovs + mountPropagation: Bidirectional + - mountPath: /run/ovn + name: host-run-ovn + - mountPath: /var/run/netns + name: host-ns + mountPropagation: HostToContainer + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /tmp + name: tmp + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + tcpSocket: + port: 10665 + timeoutSeconds: 3 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + tcpSocket: + port: 10665 + timeoutSeconds: 3 + resources: + requests: + cpu: {{ kube_ovn_cni_server_cpu_request }} + memory: {{ kube_ovn_cni_server_memory_request }} + limits: + cpu: {{ kube_ovn_cni_server_cpu_limit }} + memory: {{ kube_ovn_cni_server_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: shared-dir + hostPath: + path: /var/lib/kubelet/pods + - name: systemid + hostPath: + path: /etc/origin/openvswitch + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: cni-conf + hostPath: + path: /etc/cni/net.d + - name: cni-bin + hostPath: + path: /opt/cni/bin + - name: host-ns + hostPath: + path: /var/run/netns + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: tmp + hostPath: + path: /tmp +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-ovn-pinger + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the openvswitch daemon. +spec: + selector: + matchLabels: + app: kube-ovn-pinger + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: kube-ovn-pinger + component: network + type: infra + spec: + serviceAccountName: ovn + hostPID: true + containers: + - name: pinger + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /kube-ovn/kube-ovn-pinger + args: + - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{''}} + - --external-dns={{ kube_ovn_external_dns }} + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-pinger.log + - --log_file_max_size=0 + securityContext: + runAsUser: 0 + privileged: false + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /lib/modules + name: host-modules + readOnly: true + - mountPath: /run/openvswitch + name: host-run-ovs + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + resources: + requests: + cpu: {{ kube_ovn_pinger_cpu_request }} + memory: {{ kube_ovn_pinger_memory_request }} + limits: + cpu: {{ kube_ovn_pinger_cpu_limit }} + memory: {{ kube_ovn_pinger_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kube-ovn-monitor + namespace: kube-system + annotations: + kubernetes.io/description: | + Metrics for OVN components: northd, nb and sb. +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: kube-ovn-monitor + template: + metadata: + labels: + app: kube-ovn-monitor + component: network + type: infra + spec: + tolerations: + - operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: kube-ovn-monitor + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: kube-ovn-monitor + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/start-ovn-monitor.sh"] + securityContext: + runAsUser: 0 + privileged: false + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + requests: + cpu: {{ kube_ovn_monitor_cpu_request }} + memory: {{ kube_ovn_monitor_memory_request }} + limits: + cpu: {{ kube_ovn_monitor_cpu_limit }} + memory: {{ kube_ovn_monitor_memory_limit }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - cat + - /var/run/ovn/ovn-controller.pid + periodSeconds: 10 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - cat + - /var/run/ovn/ovn-controller.pid + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 5 + timeoutSeconds: 45 + nodeSelector: + kubernetes.io/os: "linux" + kube-ovn/role: "master" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-monitor + namespace: kube-system + labels: + app: kube-ovn-monitor +spec: + ports: + - name: metrics + port: 10661 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-monitor + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-pinger + namespace: kube-system + labels: + app: kube-ovn-pinger +spec: +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-pinger + ports: + - port: 8080 + name: metrics +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-controller + namespace: kube-system + labels: + app: kube-ovn-controller +spec: +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-controller + ports: + - port: 10660 + name: metrics +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-cni + namespace: kube-system + labels: + app: kube-ovn-cni +spec: +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-cni + ports: + - port: 10665 + name: metrics diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 new file mode 100644 index 0000000..2d8a5c3 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 @@ -0,0 +1,513 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ovn + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + name: system:ovn +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - kube-ovn + - apiGroups: + - "kubeovn.io" + resources: + - vpcs + - vpcs/status + - vpc-nat-gateways + - subnets + - subnets/status + - ips + - vips + - vips/status + - vlans + - vlans/status + - provider-networks + - provider-networks/status + - security-groups + - security-groups/status + - htbqoses + - iptables-eips + - iptables-fip-rules + - iptables-dnat-rules + - iptables-snat-rules + - iptables-eips/status + - iptables-fip-rules/status + - iptables-dnat-rules/status + - iptables-snat-rules/status + verbs: + - "*" + - apiGroups: + - "" + resources: + - pods + - pods/exec + - namespaces + - nodes + - configmaps + verbs: + - create + - get + - list + - watch + - patch + - update + - apiGroups: + - "k8s.cni.cncf.io" + resources: + - network-attachment-definitions + verbs: + - create + - delete + - get + - list + - update + - apiGroups: + - "" + - networking.k8s.io + - apps + - extensions + resources: + - networkpolicies + - services + - endpoints + - statefulsets + - daemonsets + - deployments + - deployments/scale + verbs: + - create + - delete + - update + - patch + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - "*" + - apiGroups: + - "k8s.cni.cncf.io" + resources: + - network-attachment-definitions + verbs: + - create + - delete + - get + - list + - update + - apiGroups: + - "kubevirt.io" + resources: + - virtualmachines + - virtualmachineinstances + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ovn +roleRef: + name: system:ovn + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: ovn + namespace: kube-system +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-nb + namespace: kube-system +spec: + ports: + - name: ovn-nb + protocol: TCP + port: 6641 + targetPort: 6641 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-nb-leader: "true" + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-sb + namespace: kube-system +spec: + ports: + - name: ovn-sb + protocol: TCP + port: 6642 + targetPort: 6642 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-sb-leader: "true" + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-northd + namespace: kube-system +spec: + ports: + - name: ovn-northd + protocol: TCP + port: 6643 + targetPort: 6643 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-northd-leader: "true" + sessionAffinity: None +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ovn-central + namespace: kube-system + annotations: + kubernetes.io/description: | + OVN components: northd, nb and sb. +spec: + replicas: {{ kube_ovn_central_replics }} + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: ovn-central + template: + metadata: + labels: + app: ovn-central + component: network + type: infra + spec: + tolerations: + - operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: ovn-central + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: ovn-central + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/start-db.sh"] + securityContext: + capabilities: + add: ["SYS_NICE"] + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: {{ kube_ovn_db_cpu_request }} + memory: {{ kube_ovn_db_memory_request }} + limits: + cpu: {{ kube_ovn_db_cpu_limit }} + memory: {{ kube_ovn_db_memory_limit }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - bash + - /kube-ovn/ovn-healthcheck.sh + periodSeconds: 15 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - bash + - /kube-ovn/ovn-healthcheck.sh + initialDelaySeconds: 30 + periodSeconds: 15 + failureThreshold: 5 + timeoutSeconds: 45 + nodeSelector: + kubernetes.io/os: "linux" + kube-ovn/role: "master" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ovs-ovn + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the openvswitch daemon. +spec: + selector: + matchLabels: + app: ovs + updateStrategy: + type: OnDelete + template: + metadata: + labels: + app: ovs + component: network + type: infra + spec: + tolerations: + - operator: Exists + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + hostPID: true + containers: + - name: openvswitch + image: {% if kube_ovn_dpdk_enabled %}{{ kube_ovn_dpdk_container_image_repo }}:{{ kube_ovn_dpdk_container_image_tag }}{% else %}{{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}{% endif %} + + imagePullPolicy: {{ k8s_image_pull_policy }} + command: [{% if kube_ovn_dpdk_enabled %}"/kube-ovn/start-ovs-dpdk.sh"{% else %}"/kube-ovn/start-ovs.sh"{% endif %}] + securityContext: + runAsUser: 0 + privileged: true + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +{% if not kube_ovn_dpdk_enabled %} + - name: HW_OFFLOAD + value: "{{ kube_ovn_hw_offload | string | lower }}" + - name: TUNNEL_TYPE + value: "{{ kube_ovn_tunnel_type }}" +{% endif %} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /var/run/netns + name: host-ns + mountPropagation: HostToContainer + - mountPath: /lib/modules + name: host-modules + readOnly: true + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/cni/net.d + name: cni-conf + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn +{% if kube_ovn_dpdk_enabled %} + - mountPath: /opt/ovs-config + name: host-config-ovs + - mountPath: /dev/hugepages + name: hugepage +{% endif %} + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - bash +{% if kube_ovn_dpdk_enabled %} + - /kube-ovn/ovs-dpdk-healthcheck.sh +{% else %} + - /kube-ovn/ovs-healthcheck.sh +{% endif %} + periodSeconds: 5 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - bash +{% if kube_ovn_dpdk_enabled %} + - /kube-ovn/ovs-dpdk-healthcheck.sh +{% else %} + - /kube-ovn/ovs-healthcheck.sh +{% endif %} + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 5 + timeoutSeconds: 45 + resources: +{% if kube_ovn_dpdk_enabled %} + requests: + cpu: {{ kube_ovn_dpdk_node_cpu_request }} + memory: {{ kube_ovn_dpdk_node_memory_request }} + limits: + cpu: {{ kube_ovn_dpdk_node_cpu_limit }} + memory: {{ kube_ovn_dpdk_node_memory_limit }} + hugepages-1Gi: 1Gi +{% else %} + requests: + cpu: {{ kube_ovn_node_cpu_request }} + memory: {{ kube_ovn_node_memory_request }} + limits: + cpu: {{ kube_ovn_node_cpu_limit }} + memory: {{ kube_ovn_node_memory_limit }} +{% endif %} + nodeSelector: + kubernetes.io/os: "linux" + ovn.kubernetes.io/ovs_dp_type: "kernel" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-ns + hostPath: + path: /var/run/netns + - name: cni-conf + hostPath: + path: /etc/cni/net.d + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn +{% if kube_ovn_dpdk_enabled %} + - name: host-config-ovs + hostPath: + path: /opt/ovs-config + type: DirectoryOrCreate + - name: hugepage + emptyDir: + medium: HugePages +{% endif %} + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/OWNERS b/kubespray/extra_playbooks/roles/network_plugin/kube-router/OWNERS new file mode 100644 index 0000000..c95aad2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - bozzo +reviewers: + - bozzo \ No newline at end of file diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-router/defaults/main.yml new file mode 100644 index 0000000..5d4dccc --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/defaults/main.yml @@ -0,0 +1,66 @@ +--- +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +kube_router_dns_policy: ClusterFirstWithHostNet + +# Adds annotations to kubernetes nodes for advanced configuration of BGP Peers. +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md + +# Array of annotations for master +kube_router_annotations_master: [] + +# Array of annotations for every node +kube_router_annotations_node: [] + +# Array of common annotations for every node +kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +kube_router_metrics_port: 9255 diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/handlers/main.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-router/handlers/main.yml new file mode 100644 index 0000000..7bdfc5d --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/handlers/main.yml @@ -0,0 +1,20 @@ +--- +- name: reset_kube_router + command: /bin/true + notify: + - Kube-router | delete kube-router docker containers + - Kube-router | delete kube-router crio/containerd containers + +- name: Kube-router | delete kube-router docker containers + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" + register: docker_kube_router_remove + until: docker_kube_router_remove is succeeded + retries: 5 + when: container_manager in ["docker"] + +- name: Kube-router | delete kube-router crio/containerd containers + shell: '{{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + register: crictl_kube_router_remove + until: crictl_kube_router_remove is succeeded + retries: 5 + when: container_manager in ["crio", "containerd"] diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-router/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/annotate.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/annotate.yml new file mode 100644 index 0000000..e91249f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/annotate.yml @@ -0,0 +1,21 @@ +--- +- name: kube-router | Add annotations on kube_control_plane + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_master }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] + +- name: kube-router | Add annotations on kube_node + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_node }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node'] + +- name: kube-router | Add common annotations on all servers + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_all }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster'] diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/main.yml new file mode 100644 index 0000000..4cc078a --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: kube-router | Create annotations + include: annotate.yml + tags: annotate + +- name: kube-router | Create config directory + file: + path: /var/lib/kube-router + state: directory + owner: "{{ kube_owner }}" + recurse: true + mode: 0755 + +- name: kube-router | Create kubeconfig + template: + src: kubeconfig.yml.j2 + dest: /var/lib/kube-router/kubeconfig + mode: 0644 + owner: "{{ kube_owner }}" + notify: + - reset_kube_router + +- name: kube-router | Slurp cni config + slurp: + src: /etc/cni/net.d/10-kuberouter.conflist + register: cni_config_slurp + ignore_errors: true # noqa ignore-errors + +- name: kube-router | Set cni_config variable + set_fact: + cni_config: "{{ cni_config_slurp.content | b64decode | from_json }}" + when: + - not cni_config_slurp.failed + +- name: kube-router | Set host_subnet variable + set_fact: + host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}" + when: + - cni_config is defined + - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0 + +- name: kube-router | Create cni config + template: + src: cni-conf.json.j2 + dest: /etc/cni/net.d/10-kuberouter.conflist + mode: 0644 + owner: "{{ kube_owner }}" + notify: + - reset_kube_router + +- name: kube-router | Delete old configuration + file: + path: /etc/cni/net.d/10-kuberouter.conf + state: absent + +- name: kube-router | Create manifest + template: + src: kube-router.yml.j2 + dest: "{{ kube_config_dir }}/kube-router.yml" + mode: 0644 + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/reset.yml b/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/reset.yml new file mode 100644 index 0000000..7b8ad2c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/tasks/reset.yml @@ -0,0 +1,28 @@ +--- +- name: reset | check kube-dummy-if network device + stat: + path: /sys/class/net/kube-dummy-if + get_attributes: no + get_checksum: no + get_mime: no + register: kube_dummy_if + +- name: reset | remove the network device created by kube-router + command: ip link del kube-dummy-if + when: kube_dummy_if.stat.exists + +- name: check kube-bridge exists + stat: + path: /sys/class/net/kube-bridge + get_attributes: no + get_checksum: no + get_mime: no + register: kube_bridge_if + +- name: reset | donw the network bridge create by kube-router + command: ip link set kube-bridge down + when: kube_bridge_if.stat.exists + +- name: reset | remove the network bridge create by kube-router + command: ip link del kube-bridge + when: kube_bridge_if.stat.exists diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/cni-conf.json.j2 b/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/cni-conf.json.j2 new file mode 100644 index 0000000..91fafac --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/cni-conf.json.j2 @@ -0,0 +1,27 @@ +{ + "cniVersion":"0.3.0", + "name":"kubernetes", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, +{% if kube_router_support_hairpin_mode %} + "hairpinMode":true, +{% endif %} + "ipam":{ +{% if host_subnet is defined %} + "subnet": "{{ host_subnet }}", +{% endif %} + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] +} diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/kube-router.yml.j2 new file mode 100644 index 0000000..ab677ab --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/kube-router.yml.j2 @@ -0,0 +1,220 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + minReadySeconds: 3 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: +{% if kube_router_enable_metrics %} + prometheus.io/path: {{ kube_router_metrics_path }} + prometheus.io/port: "{{ kube_router_metrics_port }}" + prometheus.io/scrape: "true" +{% endif %} + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + containers: + - name: kube-router + image: {{ kube_router_image_repo }}:{{ kube_router_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --run-router={{ kube_router_run_router | bool }} + - --run-firewall={{ kube_router_run_firewall | bool }} + - --run-service-proxy={{ kube_router_run_service_proxy | bool }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --bgp-graceful-restart=true +{% if kube_router_advertise_cluster_ip %} + - --advertise-cluster-ip +{% endif %} +{% if kube_router_advertise_external_ip %} + - --advertise-external-ip +{% endif %} +{% if kube_router_advertise_loadbalancer_ip %} + - --advertise-loadbalancer-ip +{% endif %} +{% if kube_router_cluster_asn %} + - --cluster-asn={{ kube_router_cluster_asn }} +{% endif %} +{% if kube_router_peer_router_asns %} + - --peer-router-asns={{ kube_router_peer_router_asns }} +{% endif %} +{% if kube_router_peer_router_ips %} + - --peer-router-ips={{ kube_router_peer_router_ips }} +{% endif %} +{% if kube_router_peer_router_ports %} + - --peer-router-ports={{ kube_router_peer_router_ports }} +{% endif %} +{% if kube_router_enable_metrics %} + - --metrics-path={{ kube_router_metrics_path }} + - --metrics-port={{ kube_router_metrics_port }} +{% endif %} +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - --runtime-endpoint=unix:///var/run/docker.sock +{% endif %} +{% if container_manager == "containerd" %} +{% endif %} + - --runtime-endpoint=unix:///run/containerd/containerd.sock +{% endif %} +{% for arg in kube_router_extra_args %} + - "{{ arg }}" +{% endfor %} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - name: docker-socket + mountPath: /var/run/docker.sock + readOnly: true +{% endif %} +{% if container_manager == "containerd" %} + - name: containerd-socket + mountPath: /run/containerd/containerd.sock + readOnly: true +{% endif %} +{% endif %} + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false +{% if kube_router_enable_metrics %} + ports: + - containerPort: {{ kube_router_metrics_port }} + hostPort: {{ kube_router_metrics_port }} + name: metrics + protocol: TCP +{% endif %} + hostNetwork: true + dnsPolicy: {{ kube_router_dns_policy }} +{% if kube_router_enable_dsr %} + hostIPC: true + hostPID: true +{% endif %} + tolerations: + - operator: Exists + volumes: +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - name: docker-socket + hostPath: + path: /var/run/docker.sock + type: Socket +{% endif %} +{% if container_manager == "containerd" %} + - name: containerd-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket +{% endif %} +{% endif %} + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kubeconfig + hostPath: + path: /var/lib/kube-router + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 new file mode 100644 index 0000000..42fd317 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusterCIDR: {{ kube_pods_subnet }} +clusters: +- name: cluster + cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-router + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +contexts: +- context: + cluster: cluster + user: kube-router + name: kube-router-context +current-context: kube-router-context diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/OWNERS b/kubespray/extra_playbooks/roles/network_plugin/macvlan/OWNERS new file mode 100644 index 0000000..c5dfbc7 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - simon +reviewers: + - simon diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/macvlan/defaults/main.yml new file mode 100644 index 0000000..70a8dd0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/defaults/main.yml @@ -0,0 +1,6 @@ +--- +macvlan_interface: eth0 +enable_nat_default_gateway: true + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifdown-local b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifdown-local new file mode 100644 index 0000000..003b8a1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifdown-local @@ -0,0 +1,6 @@ +#!/bin/bash + +POSTDOWNNAME="/etc/sysconfig/network-scripts/post-down-$1" +if [ -x $POSTDOWNNAME ]; then + exec $POSTDOWNNAME +fi diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifdown-macvlan b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifdown-macvlan new file mode 100755 index 0000000..4d26db5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifdown-macvlan @@ -0,0 +1,41 @@ +#!/bin/bash +# +# initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-eth" +fi + +${OTHERSCRIPT} ${CONFIG} + +ip link del ${DEVICE} type ${TYPE:-macvlan} + diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifup-local b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifup-local new file mode 100755 index 0000000..3b6891e --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifup-local @@ -0,0 +1,6 @@ +#!/bin/bash + +POSTUPNAME="/etc/sysconfig/network-scripts/post-up-$1" +if [ -x $POSTUPNAME ]; then + exec $POSTUPNAME +fi diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifup-macvlan b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifup-macvlan new file mode 100755 index 0000000..8dc61aa --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/files/ifup-macvlan @@ -0,0 +1,44 @@ +#!/bin/bash +# +# initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-eth" +fi + +ip link add \ + link ${MACVLAN_PARENT} \ + name ${DEVICE} \ + type ${TYPE:-macvlan} mode ${MACVLAN_MODE:-private} + +${OTHERSCRIPT} ${CONFIG} + diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/handlers/main.yml b/kubespray/extra_playbooks/roles/network_plugin/macvlan/handlers/main.yml new file mode 100644 index 0000000..abb018c --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/handlers/main.yml @@ -0,0 +1,19 @@ +--- +- name: Macvlan | restart network + command: /bin/true + notify: + - Macvlan | reload network + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Macvlan | reload network + service: + name: >- + {% if ansible_os_family == "RedHat" -%} + network + {%- elif ansible_distribution == "Ubuntu" and ansible_distribution_release == "bionic" -%} + systemd-networkd + {%- elif ansible_os_family == "Debian" -%} + networking + {%- endif %} + state: restarted + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and kube_network_plugin not in ['canal', 'calico'] diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/macvlan/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/macvlan/tasks/main.yml new file mode 100644 index 0000000..bdc2dbc --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/tasks/main.yml @@ -0,0 +1,110 @@ +--- +- name: Macvlan | Retrieve Pod Cidr + command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" + changed_when: false + register: node_pod_cidr_cmd + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Macvlan | set node_pod_cidr + set_fact: + node_pod_cidr={{ node_pod_cidr_cmd.stdout }} + +- name: Macvlan | Retrieve default gateway network interface + become: false + raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/' + changed_when: false + register: node_default_gateway_interface_cmd + +- name: Macvlan | set node_default_gateway_interface + set_fact: + node_default_gateway_interface={{ node_default_gateway_interface_cmd.stdout | trim }} + +- name: Macvlan | Install network gateway interface on debian + template: + src: debian-network-macvlan.cfg.j2 + dest: /etc/network/interfaces.d/60-mac0.cfg + mode: 0644 + notify: Macvlan | restart network + when: ansible_os_family in ["Debian"] + +- block: + - name: Macvlan | Install macvlan script on centos + copy: + src: "{{ item }}" + dest: /etc/sysconfig/network-scripts/ + owner: root + group: root + mode: "0755" + with_fileglob: + - files/* + + - name: Macvlan | Install post-up script on centos + copy: + src: "files/ifup-local" + dest: /sbin/ + owner: root + group: root + mode: "0755" + when: enable_nat_default_gateway + + - name: Macvlan | Install network gateway interface on centos + template: + src: "{{ item.src }}.j2" + dest: "/etc/sysconfig/network-scripts/{{ item.dst }}" + mode: 0644 + with_items: + - {src: centos-network-macvlan.cfg, dst: ifcfg-mac0 } + - {src: centos-routes-macvlan.cfg, dst: route-mac0 } + - {src: centos-postup-macvlan.cfg, dst: post-up-mac0 } + notify: Macvlan | restart network + + when: ansible_os_family == "RedHat" + +- block: + - name: Macvlan | Install service nat via gateway on Flatcar Container Linux + template: + src: coreos-service-nat_ouside.j2 + dest: /etc/systemd/system/enable_nat_ouside.service + mode: 0644 + when: enable_nat_default_gateway + + - name: Macvlan | Enable service nat via gateway on Flatcar Container Linux + command: "{{ item }}" + with_items: + - systemctl daemon-reload + - systemctl enable enable_nat_ouside.service + when: enable_nat_default_gateway + + - name: Macvlan | Install network gateway interface on Flatcar Container Linux + template: + src: "{{ item.src }}.j2" + dest: "/etc/systemd/network/{{ item.dst }}" + mode: 0644 + with_items: + - {src: coreos-device-macvlan.cfg, dst: macvlan.netdev } + - {src: coreos-interface-macvlan.cfg, dst: output.network } + - {src: coreos-network-macvlan.cfg, dst: macvlan.network } + notify: Macvlan | restart network + + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Macvlan | Install cni definition for Macvlan + template: + src: 10-macvlan.conf.j2 + dest: /etc/cni/net.d/10-macvlan.conf + mode: 0644 + +- name: Macvlan | Install loopback definition for Macvlan + template: + src: 99-loopback.conf.j2 + dest: /etc/cni/net.d/99-loopback.conf + mode: 0644 + +- name: Enable net.ipv4.conf.all.arp_notify in sysctl + sysctl: + name: net.ipv4.conf.all.arp_notify + value: 1 + sysctl_set: yes + sysctl_file: "{{ sysctl_file_path }}" + state: present + reload: yes diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 new file mode 100644 index 0000000..10598a2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 @@ -0,0 +1,15 @@ +{ + "cniVersion": "0.4.0", + "name": "mynet", + "type": "macvlan", + "master": "{{ macvlan_interface }}", + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ node_pod_cidr }}", + "routes": [ + { "dst": "0.0.0.0/0" } + ], + "gateway": "{{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" + } +} diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 new file mode 100644 index 0000000..b41ab65 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 @@ -0,0 +1,5 @@ +{ + "cniVersion": "0.2.0", + "name": "lo", + "type": "loopback" +} diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 new file mode 100644 index 0000000..e7bad78 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 @@ -0,0 +1,14 @@ +DEVICE=mac0 +DEVICETYPE=macvlan +TYPE=macvlan +BOOTPROTO=none +ONBOOT=yes +NM_CONTROLLED=no + +MACVLAN_PARENT={{ macvlan_interface }} +MACVLAN_MODE=bridge + +IPADDR={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }} +NETMASK={{ node_pod_cidr|ipaddr('netmask') }} +NETWORK={{ node_pod_cidr|ipaddr('network') }} + diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 new file mode 100644 index 0000000..f3edd99 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 @@ -0,0 +1,4 @@ +{% if enable_nat_default_gateway %} +iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} + diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 new file mode 100644 index 0000000..35cd5b5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 @@ -0,0 +1,4 @@ +{% if enable_nat_default_gateway %} +iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} + diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 new file mode 100644 index 0000000..60400dd --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 @@ -0,0 +1,7 @@ +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} +{{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 new file mode 100644 index 0000000..2418dac --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 @@ -0,0 +1,6 @@ +[NetDev] +Name=mac0 +Kind=macvlan + +[MACVLAN] +Mode=bridge diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 new file mode 100644 index 0000000..342f680 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 @@ -0,0 +1,6 @@ +[Match] +Name={{ macvlan_interface }} + +[Network] +MACVLAN=mac0 +DHCP=yes diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 new file mode 100644 index 0000000..696eba5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 @@ -0,0 +1,18 @@ +[Match] +Name=mac0 + +[Network] +Address={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}/{{ node_pod_cidr|ipaddr('prefix') }} + +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} +[Route] +Gateway={{ hostvars[host]['access_ip'] }} +Destination={{ hostvars[host]['node_pod_cidr'] }} +GatewayOnlink=yes + +{% endif %} +{% endif %} +{% endfor %} + diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 new file mode 100644 index 0000000..5f00b00 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 @@ -0,0 +1,6 @@ +[Service] +Type=oneshot +ExecStart=/bin/bash -c "iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE" + +[Install] +WantedBy=sys-subsystem-net-devices-mac0.device diff --git a/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 new file mode 100644 index 0000000..9edd6d1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 @@ -0,0 +1,27 @@ +auto mac0 +iface mac0 inet static + address {{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }} + network {{ node_pod_cidr|ipaddr('network') }} + netmask {{ node_pod_cidr|ipaddr('netmask') }} + broadcast {{ node_pod_cidr|ipaddr('broadcast') }} + pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} + post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} +{% if enable_nat_default_gateway %} + post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} + post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} + post-down iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE + post-down ip link delete mac0 + diff --git a/kubespray/extra_playbooks/roles/network_plugin/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/meta/main.yml new file mode 100644 index 0000000..cb013fc --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/meta/main.yml @@ -0,0 +1,48 @@ +--- +dependencies: + - role: network_plugin/cni + + - role: network_plugin/cilium + when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + tags: + - cilium + + - role: network_plugin/calico + when: kube_network_plugin == 'calico' + tags: + - calico + + - role: network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: + - flannel + + - role: network_plugin/weave + when: kube_network_plugin == 'weave' + tags: + - weave + + - role: network_plugin/canal + when: kube_network_plugin == 'canal' + tags: + - canal + + - role: network_plugin/macvlan + when: kube_network_plugin == 'macvlan' + tags: + - macvlan + + - role: network_plugin/kube-ovn + when: kube_network_plugin == 'kube-ovn' + tags: + - kube-ovn + + - role: network_plugin/kube-router + when: kube_network_plugin == 'kube-router' + tags: + - kube-router + + - role: network_plugin/multus + when: kube_network_plugin_multus + tags: + - multus diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/defaults/main.yml new file mode 100644 index 0000000..cbeb4cb --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/defaults/main.yml @@ -0,0 +1,10 @@ +--- +multus_conf_file: "auto" +multus_cni_conf_dir_host: "/etc/cni/net.d" +multus_cni_bin_dir_host: "/opt/cni/bin" +multus_cni_run_dir_host: "/run" +multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" +multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}" +multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}" +multus_cni_version: "0.4.0" +multus_kubeconfig_file_host: "{{ (multus_cni_conf_dir_host, '/multus.d/multus.kubeconfig') | join }}" diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-clusterrole.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-clusterrole.yml new file mode 100644 index 0000000..b574069 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-clusterrole.yml @@ -0,0 +1,28 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-clusterrolebinding.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-clusterrolebinding.yml new file mode 100644 index 0000000..2d1e1a4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-clusterrolebinding.yml @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-crd.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-crd.yml new file mode 100644 index 0000000..24b2c58 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-crd.yml @@ -0,0 +1,45 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-serviceaccount.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-serviceaccount.yml new file mode 100644 index 0000000..6242308 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/files/multus-serviceaccount.yml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/multus/tasks/main.yml new file mode 100644 index 0000000..3552b05 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Multus | Copy manifest files + copy: + src: "{{ item.file }}" + dest: "{{ kube_config_dir }}" + mode: 0644 + with_items: + - {name: multus-crd, file: multus-crd.yml, type: customresourcedefinition} + - {name: multus-serviceaccount, file: multus-serviceaccount.yml, type: serviceaccount} + - {name: multus-clusterrole, file: multus-clusterrole.yml, type: clusterrole} + - {name: multus-clusterrolebinding, file: multus-clusterrolebinding.yml, type: clusterrolebinding} + register: multus_manifest_1 + +- name: Multus | Copy manifest templates + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: multus-daemonset, file: multus-daemonset.yml, type: daemonset} + register: multus_manifest_2 diff --git a/kubespray/extra_playbooks/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 new file mode 100644 index 0000000..494dee2 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 @@ -0,0 +1,71 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-multus-ds-{{ image_arch }} + namespace: kube-system + labels: + tier: node + app: multus +spec: + selector: + matchLabels: + tier: node + app: multus + template: + metadata: + labels: + tier: node + app: multus + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/arch: {{ image_arch }} + tolerations: + - operator: Exists + serviceAccountName: multus + containers: + - name: kube-multus + image: {{ multus_image_repo }}:{{ multus_image_tag }} + command: ["/entrypoint.sh"] + args: + - "--cni-conf-dir={{ multus_cni_conf_dir }}" + - "--cni-bin-dir={{ multus_cni_bin_dir }}" + - "--multus-conf-file={{ multus_conf_file }}" + - "--multus-kubeconfig-file-host={{ multus_kubeconfig_file_host }}" + - "--cni-version={{ multus_cni_version }}" + resources: + requests: + cpu: "100m" + memory: "90Mi" + limits: + cpu: "100m" + memory: "90Mi" + securityContext: + privileged: true +{% if container_manager == 'crio' %} + capabilities: + add: ["SYS_ADMIN"] +{% endif %} + volumeMounts: +{% if container_manager == 'crio' %} + - name: run + mountPath: {{ multus_cni_run_dir }} +{% endif %} + - name: cni + mountPath: {{ multus_cni_conf_dir }} + - name: cnibin + mountPath: {{ multus_cni_bin_dir }} + volumes: +{% if container_manager == 'crio' %} + - name: run + hostPath: + path: {{ multus_cni_run_dir_host }} +{% endif %} + - name: cni + hostPath: + path: {{ multus_cni_conf_dir_host }} + - name: cnibin + hostPath: + path: {{ multus_cni_bin_dir_host }} diff --git a/kubespray/extra_playbooks/roles/network_plugin/ovn4nfv/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/ovn4nfv/tasks/main.yml new file mode 100644 index 0000000..da21266 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: ovn4nfv | Label control-plane node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: ovn4nfv | Create ovn4nfv-k8s manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: ovn-daemonset, file: ovn-daemonset.yml} + - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} + register: ovn4nfv_node_manifests diff --git a/kubespray/extra_playbooks/roles/network_plugin/weave/defaults/main.yml b/kubespray/extra_playbooks/roles/network_plugin/weave/defaults/main.yml new file mode 100644 index 0000000..47469ae --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/weave/defaults/main.yml @@ -0,0 +1,64 @@ +--- + +# Weave's network password for encryption, if null then no network encryption. +weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +weave_iptables_backend: ~ + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +weave_npc_extra_args: ~ diff --git a/kubespray/extra_playbooks/roles/network_plugin/weave/meta/main.yml b/kubespray/extra_playbooks/roles/network_plugin/weave/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/weave/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/extra_playbooks/roles/network_plugin/weave/tasks/main.yml b/kubespray/extra_playbooks/roles/network_plugin/weave/tasks/main.yml new file mode 100644 index 0000000..ae4a5a4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/weave/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Weave | Create manifest + template: + src: weave-net.yml.j2 + dest: "{{ kube_config_dir }}/weave-net.yml" + mode: 0644 + +- name: Weave | Fix nodePort for Weave + template: + src: 10-weave.conflist.j2 + dest: /etc/cni/net.d/10-weave.conflist + mode: 0644 diff --git a/kubespray/extra_playbooks/roles/network_plugin/weave/templates/10-weave.conflist.j2 b/kubespray/extra_playbooks/roles/network_plugin/weave/templates/10-weave.conflist.j2 new file mode 100644 index 0000000..9aab7e9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/weave/templates/10-weave.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.0", + "name": "weave", + "plugins": [ + { + "name": "weave", + "type": "weave-net", + "hairpinMode": {{ weave_hairpin_mode | bool | lower }} + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] +} diff --git a/kubespray/extra_playbooks/roles/network_plugin/weave/templates/weave-net.yml.j2 b/kubespray/extra_playbooks/roles/network_plugin/weave/templates/weave-net.yml.j2 new file mode 100644 index 0000000..84c4fa0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -0,0 +1,297 @@ +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: weave-net + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - pods + - namespaces + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - 'networking.k8s.io' + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - nodes/status + verbs: + - patch + - update + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: weave-net + labels: + name: weave-net + roleRef: + kind: ClusterRole + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - weave-net + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + roleRef: + kind: Role + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + spec: + # Wait 5 seconds to let pod connect before rolling next pod + selector: + matchLabels: + name: weave-net + minReadySeconds: 5 + template: + metadata: + labels: + name: weave-net + spec: + initContainers: + - name: weave-init + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /home/weave/init.sh + env: + securityContext: + privileged: true + volumeMounts: + - name: cni-bin + mountPath: /host/opt + - name: cni-bin2 + mountPath: /host/home + - name: cni-conf + mountPath: /host/etc + - name: lib-modules + mountPath: /lib/modules + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + containers: + - name: weave + command: + - /home/weave/launch.sh + env: + - name: INIT_CONTAINER + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: WEAVE_PASSWORD + valueFrom: + secretKeyRef: + name: weave-net + key: WEAVE_PASSWORD + - name: CHECKPOINT_DISABLE + value: "{{ weave_checkpoint_disable | bool | int }}" + - name: CONN_LIMIT + value: "{{ weave_conn_limit | int }}" + - name: HAIRPIN_MODE + value: "{{ weave_hairpin_mode | bool | lower }}" + - name: IPALLOC_RANGE + value: "{{ weave_ipalloc_range }}" + - name: EXPECT_NPC + value: "{{ weave_expect_npc | bool | int }}" +{% if weave_kube_peers %} + - name: KUBE_PEERS + value: "{{ weave_kube_peers }}" +{% endif %} +{% if weave_ipalloc_init %} + - name: IPALLOC_INIT + value: "{{ weave_ipalloc_init }}" +{% endif %} +{% if weave_expose_ip %} + - name: WEAVE_EXPOSE_IP + value: "{{ weave_expose_ip }}" +{% endif %} +{% if weave_metrics_addr %} + - name: WEAVE_METRICS_ADDR + value: "{{ weave_metrics_addr }}" +{% endif %} +{% if weave_status_addr %} + - name: WEAVE_STATUS_ADDR + value: "{{ weave_status_addr }}" +{% endif %} +{% if weave_iptables_backend %} + - name: IPTABLES_BACKEND + value: "{{ weave_iptables_backend }}" +{% endif %} + - name: WEAVE_MTU + value: "{{ weave_mtu | int }}" + - name: NO_MASQ_LOCAL + value: "{{ weave_no_masq_local | bool | int }}" +{% if weave_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_extra_args }}" +{% endif %} + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /status + port: 6784 + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: weavedb + mountPath: /weavedb + - name: dbus + mountPath: /host/var/lib/dbus + readOnly: true + - mountPath: /host/etc/machine-id + name: cni-machine-id + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: weave-npc + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName +{% if weave_npc_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_npc_extra_args }}" +{% endif %} + image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: false + restartPolicy: Always + securityContext: + seLinuxOptions: {} + serviceAccountName: weave-net + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: weavedb + hostPath: + path: /var/lib/weave + - name: cni-bin + hostPath: + path: /opt + - name: cni-bin2 + hostPath: + path: /home + - name: cni-conf + hostPath: + path: /etc + - name: cni-machine-id + hostPath: + path: /etc/machine-id + - name: dbus + hostPath: + path: /var/lib/dbus + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + priorityClassName: system-node-critical + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate + - apiVersion: v1 + kind: Secret + metadata: + name: weave-net + namespace: kube-system + data: + WEAVE_PASSWORD: "{{ weave_password | default("") | b64encode }}" diff --git a/kubespray/extra_playbooks/roles/recover_control_plane/OWNERS b/kubespray/extra_playbooks/roles/recover_control_plane/OWNERS new file mode 100644 index 0000000..cb814a1 --- /dev/null +++ b/kubespray/extra_playbooks/roles/recover_control_plane/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - qvicksilver + - yujunz +reviewers: + - qvicksilver + - yujunz diff --git a/kubespray/extra_playbooks/roles/recover_control_plane/control-plane/defaults/main.yml b/kubespray/extra_playbooks/roles/recover_control_plane/control-plane/defaults/main.yml new file mode 100644 index 0000000..229514b --- /dev/null +++ b/kubespray/extra_playbooks/roles/recover_control_plane/control-plane/defaults/main.yml @@ -0,0 +1,2 @@ +--- +bin_dir: /usr/local/bin diff --git a/kubespray/extra_playbooks/roles/recover_control_plane/control-plane/tasks/main.yml b/kubespray/extra_playbooks/roles/recover_control_plane/control-plane/tasks/main.yml new file mode 100644 index 0000000..4a4e3eb --- /dev/null +++ b/kubespray/extra_playbooks/roles/recover_control_plane/control-plane/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: Wait for apiserver + command: "{{ kubectl }} get nodes" + environment: + - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + register: apiserver_is_ready + until: apiserver_is_ready.rc == 0 + retries: 6 + delay: 10 + changed_when: false + when: groups['broken_kube_control_plane'] + +- name: Delete broken kube_control_plane nodes from cluster + command: "{{ kubectl }} delete node {{ item }}" + environment: + - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + with_items: "{{ groups['broken_kube_control_plane'] }}" + register: delete_broken_kube_masters + failed_when: false + when: groups['broken_kube_control_plane'] + +- name: Fail if unable to delete broken kube_control_plane nodes from cluster + fail: + msg: "Unable to delete broken kube_control_plane node: {{ item.item }}" + loop: "{{ delete_broken_kube_masters.results }}" + changed_when: false + when: + - groups['broken_kube_control_plane'] + - "item.rc != 0 and not 'NotFound' in item.stderr" diff --git a/kubespray/extra_playbooks/roles/recover_control_plane/etcd/tasks/main.yml b/kubespray/extra_playbooks/roles/recover_control_plane/etcd/tasks/main.yml new file mode 100644 index 0000000..45e2c65 --- /dev/null +++ b/kubespray/extra_playbooks/roles/recover_control_plane/etcd/tasks/main.yml @@ -0,0 +1,93 @@ +--- +- name: Get etcd endpoint health + command: "{{ bin_dir }}/etcdctl endpoint health" + register: etcd_endpoint_health + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + when: + - groups['broken_etcd'] + +- name: Set healthy fact + set_fact: + healthy: "{{ etcd_endpoint_health.stderr is match('Error: unhealthy cluster') }}" + when: + - groups['broken_etcd'] + +- name: Set has_quorum fact + set_fact: + has_quorum: "{{ etcd_endpoint_health.stdout_lines | select('match', '.*is healthy.*') | list | length >= etcd_endpoint_health.stderr_lines | select('match', '.*is unhealthy.*') | list | length }}" + when: + - groups['broken_etcd'] + +- include_tasks: recover_lost_quorum.yml + when: + - groups['broken_etcd'] + - not has_quorum + +- name: Remove etcd data dir + file: + path: "{{ etcd_data_dir }}" + state: absent + delegate_to: "{{ item }}" + with_items: "{{ groups['broken_etcd'] }}" + ignore_errors: true # noqa ignore-errors + when: + - groups['broken_etcd'] + - has_quorum + +- name: Delete old certificates + # noqa 302 ignore-error - rm is ok here for now + shell: "rm {{ etcd_cert_dir }}/*{{ item }}*" + with_items: "{{ groups['broken_etcd'] }}" + register: delete_old_cerificates + ignore_errors: true + when: groups['broken_etcd'] + +- name: Fail if unable to delete old certificates + fail: + msg: "Unable to delete old certificates for: {{ item.item }}" + loop: "{{ delete_old_cerificates.results }}" + changed_when: false + when: + - groups['broken_etcd'] + - "item.rc != 0 and not 'No such file or directory' in item.stderr" + +- name: Get etcd cluster members + command: "{{ bin_dir }}/etcdctl member list" + register: member_list + changed_when: false + check_mode: no + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + when: + - groups['broken_etcd'] + - not healthy + - has_quorum + +- name: Remove broken cluster members + command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + with_nested: + - "{{ groups['broken_etcd'] }}" + - "{{ member_list.stdout_lines }}" + when: + - groups['broken_etcd'] + - not healthy + - has_quorum + - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2] diff --git a/kubespray/extra_playbooks/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/kubespray/extra_playbooks/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml new file mode 100644 index 0000000..1ecc90f --- /dev/null +++ b/kubespray/extra_playbooks/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml @@ -0,0 +1,59 @@ +--- +- name: Save etcd snapshot + command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" + environment: + - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" + - ETCDCTL_API: 3 + when: etcd_snapshot is not defined + +- name: Transfer etcd snapshot to host + copy: + src: "{{ etcd_snapshot }}" + dest: /tmp/snapshot.db + mode: 0640 + when: etcd_snapshot is defined + +- name: Stop etcd + systemd: + name: etcd + state: stopped + +- name: Remove etcd data-dir + file: + path: "{{ etcd_data_dir }}" + state: absent + +- name: Restore etcd snapshot # noqa 301 305 + shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}" + environment: + - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + - ETCDCTL_API: 3 + +- name: Remove etcd snapshot + file: + path: /tmp/snapshot.db + state: absent + +- name: Change etcd data-dir owner + file: + path: "{{ etcd_data_dir }}" + owner: etcd + group: etcd + recurse: true + +- name: Reconfigure etcd + replace: + path: /etc/etcd.env + regexp: "^(ETCD_INITIAL_CLUSTER=).*" + replace: '\1{{ etcd_member_name }}={{ etcd_peer_url }}' + +- name: Start etcd + systemd: + name: etcd + state: started diff --git a/kubespray/extra_playbooks/roles/recover_control_plane/post-recover/tasks/main.yml b/kubespray/extra_playbooks/roles/recover_control_plane/post-recover/tasks/main.yml new file mode 100644 index 0000000..b1cd5e5 --- /dev/null +++ b/kubespray/extra_playbooks/roles/recover_control_plane/post-recover/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# TODO: Figure out why kubeadm does not fix this +- name: Set etcd-servers fact + set_fact: + etcd_servers: >- + {% for host in groups['etcd'] -%} + {% if not loop.last -%} + https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379, + {%- endif -%} + {%- if loop.last -%} + https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379 + {%- endif -%} + {%- endfor -%} + +- name: Update apiserver etcd-servers list + replace: + path: /etc/kubernetes/manifests/kube-apiserver.yaml + regexp: "(etcd-servers=).*" + replace: "\\1{{ etcd_servers }}" diff --git a/kubespray/extra_playbooks/roles/remove-node/post-remove/defaults/main.yml b/kubespray/extra_playbooks/roles/remove-node/post-remove/defaults/main.yml new file mode 100644 index 0000000..11298b9 --- /dev/null +++ b/kubespray/extra_playbooks/roles/remove-node/post-remove/defaults/main.yml @@ -0,0 +1,3 @@ +--- +delete_node_retries: 10 +delete_node_delay_seconds: 3 diff --git a/kubespray/extra_playbooks/roles/remove-node/post-remove/tasks/main.yml b/kubespray/extra_playbooks/roles/remove-node/post-remove/tasks/main.yml new file mode 100644 index 0000000..36b1e9f --- /dev/null +++ b/kubespray/extra_playbooks/roles/remove-node/post-remove/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: remove-node | Delete node + command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" + # ignore servers that are not nodes + when: inventory_hostname in groups['k8s_cluster'] and kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + retries: "{{ delete_node_retries }}" + # Sometimes the api-server can have a short window of indisponibility when we delete a master node + delay: "{{ delete_node_delay_seconds }}" + register: result + until: result is not failed diff --git a/kubespray/extra_playbooks/roles/remove-node/pre-remove/defaults/main.yml b/kubespray/extra_playbooks/roles/remove-node/pre-remove/defaults/main.yml new file mode 100644 index 0000000..deaa8af --- /dev/null +++ b/kubespray/extra_playbooks/roles/remove-node/pre-remove/defaults/main.yml @@ -0,0 +1,6 @@ +--- +allow_ungraceful_removal: false +drain_grace_period: 300 +drain_timeout: 360s +drain_retries: 3 +drain_retry_delay_seconds: 10 diff --git a/kubespray/extra_playbooks/roles/remove-node/pre-remove/tasks/main.yml b/kubespray/extra_playbooks/roles/remove-node/pre-remove/tasks/main.yml new file mode 100644 index 0000000..add5120 --- /dev/null +++ b/kubespray/extra_playbooks/roles/remove-node/pre-remove/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: remove-node | List nodes + command: >- + {{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} + register: nodes + delegate_to: "{{ groups['kube_control_plane']|first }}" + changed_when: false + run_once: true + +- name: remove-node | Drain node except daemonsets resource # noqa 301 + command: >- + {{ kubectl }} drain + --force + --ignore-daemonsets + --grace-period {{ drain_grace_period }} + --timeout {{ drain_timeout }} + --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + # ignore servers that are not nodes + when: kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + register: result + failed_when: result.rc != 0 and not allow_ungraceful_removal + delegate_to: "{{ groups['kube_control_plane']|first }}" + until: result.rc == 0 or allow_ungraceful_removal + retries: "{{ drain_retries }}" + delay: "{{ drain_retry_delay_seconds }}" + +- name: remove-node | Wait until Volumes will be detached from the node + command: >- + {{ kubectl }} get volumeattachments -o go-template={% raw %}'{{ range .items }}{{ .spec.nodeName }}{{ "\n" }}{{ end }}'{% endraw %} + register: nodes_with_volumes + delegate_to: "{{ groups['kube_control_plane']|first }}" + changed_when: false + until: not (kube_override_hostname|default(inventory_hostname) in nodes_with_volumes.stdout_lines) + retries: 3 + delay: "{{ drain_grace_period }}" + when: + - not allow_ungraceful_removal + - kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines diff --git a/kubespray/extra_playbooks/roles/remove-node/remove-etcd-node/tasks/main.yml b/kubespray/extra_playbooks/roles/remove-node/remove-etcd-node/tasks/main.yml new file mode 100644 index 0000000..7500d6d --- /dev/null +++ b/kubespray/extra_playbooks/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -0,0 +1,55 @@ +--- +- name: Lookup node IP in kubernetes + command: > + {{ kubectl }} get nodes {{ node }} + -o jsonpath='{range .status.addresses[?(@.type=="InternalIP")]}{@.address}{"\n"}{end}' + register: remove_node_ip + when: + - inventory_hostname in groups['etcd'] + - ip is not defined + - access_ip is not defined + delegate_to: "{{ groups['etcd']|first }}" + failed_when: false + +- name: Set node IP + set_fact: + node_ip: "{{ ip | default(access_ip | default(remove_node_ip.stdout)) | trim }}" + when: + - inventory_hostname in groups['etcd'] + +- name: Make sure node_ip is set + assert: + that: node_ip is defined and node_ip | length > 0 + msg: "Etcd node ip is not set !" + when: + - inventory_hostname in groups['etcd'] + +- name: Lookup etcd member id + shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1" + register: etcd_member_id + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" + ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" + ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" + ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379" + delegate_to: "{{ groups['etcd']|first }}" + when: inventory_hostname in groups['etcd'] + +- name: Remove etcd member from cluster + command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" + ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" + ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" + ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379" + delegate_to: "{{ groups['etcd']|first }}" + when: + - inventory_hostname in groups['etcd'] + - etcd_member_id.stdout | length > 0 diff --git a/kubespray/extra_playbooks/roles/reset/defaults/main.yml b/kubespray/extra_playbooks/roles/reset/defaults/main.yml new file mode 100644 index 0000000..e45cee4 --- /dev/null +++ b/kubespray/extra_playbooks/roles/reset/defaults/main.yml @@ -0,0 +1,3 @@ +--- +flush_iptables: true +reset_restart_network: true diff --git a/kubespray/extra_playbooks/roles/reset/tasks/main.yml b/kubespray/extra_playbooks/roles/reset/tasks/main.yml new file mode 100644 index 0000000..24be4cd --- /dev/null +++ b/kubespray/extra_playbooks/roles/reset/tasks/main.yml @@ -0,0 +1,431 @@ +--- +- name: reset | stop services + service: + name: "{{ item }}" + state: stopped + with_items: + - kubelet.service + - cri-dockerd.service + - cri-dockerd.socket + failed_when: false + tags: + - services + +- name: reset | remove services + file: + path: "/etc/systemd/system/{{ item }}" + state: absent + with_items: + - kubelet.service + - cri-dockerd.service + - cri-dockerd.socket + - calico-node.service + - containerd.service.d/http-proxy.conf + - crio.service.d/http-proxy.conf + - k8s-certs-renew.service + - k8s-certs-renew.timer + register: services_removed + tags: + - services + - containerd + - crio + +- name: reset | Remove Docker + include_role: + name: container-engine/docker + tasks_from: reset + when: container_manager == 'docker' + tags: + - docker + +- name: reset | systemctl daemon-reload # noqa 503 + systemd: + daemon_reload: true + when: services_removed.changed + +- name: reset | check if crictl is present + stat: + path: "{{ bin_dir }}/crictl" + get_attributes: no + get_checksum: no + get_mime: no + register: crictl + +- name: reset | stop all cri containers + shell: "set -o pipefail && {{ bin_dir }}/crictl ps -q | xargs -r {{ bin_dir }}/crictl -t 60s stop" + args: + executable: /bin/bash + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: + - crio + - containerd + when: + - crictl.stat.exists + - container_manager in ["crio", "containerd"] + ignore_errors: true # noqa ignore-errors + +- name: reset | force remove all cri containers + command: "{{ bin_dir }}/crictl rm -a -f" + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: + - crio + - containerd + when: + - crictl.stat.exists + - container_manager in ["crio", "containerd"] + - deploy_container_engine + ignore_errors: true # noqa ignore-errors + +- name: reset | stop and disable crio service + service: + name: crio + state: stopped + enabled: false + failed_when: false + tags: [ crio ] + when: container_manager == "crio" + +- name: reset | forcefully wipe CRI-O's container and image storage + command: "crio wipe -f" + failed_when: false + tags: [ crio ] + when: container_manager == "crio" + +- name: reset | stop all cri pods + shell: "set -o pipefail && {{ bin_dir }}/crictl pods -q | xargs -r {{ bin_dir }}/crictl -t 60s stopp" + args: + executable: /bin/bash + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: [ containerd ] + when: + - crictl.stat.exists + - container_manager == "containerd" + ignore_errors: true # noqa ignore-errors + +- block: + - name: reset | force remove all cri pods + command: "{{ bin_dir }}/crictl rmp -a -f" + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: [ containerd ] + when: + - crictl.stat.exists + - container_manager == "containerd" + + rescue: + - name: reset | force remove all cri pods (rescue) + shell: "ip netns list | cut -d' ' -f 1 | xargs -n1 ip netns delete && {{ bin_dir }}/crictl rmp -a -f" + ignore_errors: true # noqa ignore-errors + changed_when: true + +- name: reset | stop etcd services + service: + name: "{{ item }}" + state: stopped + with_items: + - etcd + - etcd-events + failed_when: false + tags: + - services + +- name: reset | remove etcd services + file: + path: "/etc/systemd/system/{{ item }}.service" + state: absent + with_items: + - etcd + - etcd-events + register: services_removed + tags: + - services + +- name: reset | remove containerd + when: container_manager == 'containerd' + block: + - name: reset | stop containerd service + service: + name: containerd + state: stopped + failed_when: false + tags: + - services + + - name: reset | remove containerd service + file: + path: /etc/systemd/system/containerd.service + state: absent + register: services_removed + tags: + - services + +- name: reset | gather mounted kubelet dirs # noqa 301 + shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac + args: + executable: /bin/bash + warn: false + check_mode: no + register: mounted_dirs + failed_when: false + tags: + - mounts + +- name: reset | unmount kubelet dirs # noqa 301 + command: umount -f {{ item }} + with_items: "{{ mounted_dirs.stdout_lines }}" + register: umount_dir + when: mounted_dirs + retries: 4 + until: umount_dir.rc == 0 + delay: 5 + tags: + - mounts + +- name: flush iptables + iptables: + table: "{{ item }}" + flush: yes + with_items: + - filter + - nat + - mangle + - raw + when: flush_iptables|bool + tags: + - iptables + +- name: flush ip6tables + iptables: + table: "{{ item }}" + flush: yes + ip_version: ipv6 + with_items: + - filter + - nat + - mangle + - raw + when: flush_iptables|bool and enable_dual_stack_networks + tags: + - ip6tables + +- name: Clear IPVS virtual server table + command: "ipvsadm -C" + ignore_errors: true # noqa ignore-errors + when: + - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster'] + +- name: reset | check kube-ipvs0 network device + stat: + path: /sys/class/net/kube-ipvs0 + get_attributes: no + get_checksum: no + get_mime: no + register: kube_ipvs0 + +- name: reset | Remove kube-ipvs0 + command: "ip link del kube-ipvs0" + when: + - kube_proxy_mode == 'ipvs' + - kube_ipvs0.stat.exists + +- name: reset | check nodelocaldns network device + stat: + path: /sys/class/net/nodelocaldns + get_attributes: no + get_checksum: no + get_mime: no + register: nodelocaldns_device + +- name: reset | Remove nodelocaldns + command: "ip link del nodelocaldns" + when: + - enable_nodelocaldns|default(false)|bool + - nodelocaldns_device.stat.exists + +- name: reset | find files/dirs with immutable flag in /var/lib/kubelet + command: lsattr -laR /var/lib/kubelet + become: true + register: var_lib_kubelet_files_dirs_w_attrs + changed_when: false + no_log: true + +- name: reset | remove immutable flag from files/dirs in /var/lib/kubelet + file: + path: "{{ filedir_path }}" + state: touch + attributes: "-i" + loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}" + loop_control: + loop_var: file_dir_line + label: "{{ filedir_path }}" + vars: + filedir_path: "{{ file_dir_line.split(' ')[0] }}" + +- name: reset | delete some files and directories + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ kube_config_dir }}" + - /var/lib/kubelet + - "{{ containerd_storage_dir }}" + - "{{ ansible_env.HOME | default('/root') }}/.kube" + - "{{ ansible_env.HOME | default('/root') }}/.helm" + - "{{ ansible_env.HOME | default('/root') }}/.config/helm" + - "{{ ansible_env.HOME | default('/root') }}/.cache/helm" + - "{{ ansible_env.HOME | default('/root') }}/.local/share/helm" + - "{{ etcd_data_dir }}" + - "{{ etcd_events_data_dir }}" + - "{{ etcd_config_dir }}" + - /var/log/calico + - /etc/cni + - /etc/nerdctl + - "{{ nginx_config_dir }}" + - /etc/dnsmasq.d + - /etc/dnsmasq.conf + - /etc/dnsmasq.d-available + - /etc/etcd.env + - /etc/calico + - /etc/NetworkManager/conf.d/calico.conf + - /etc/NetworkManager/conf.d/k8s.conf + - /etc/weave.env + - /opt/cni + - /etc/dhcp/dhclient.d/zdnsupdate.sh + - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate + - /run/flannel + - /etc/flannel + - /run/kubernetes + - /usr/local/share/ca-certificates/etcd-ca.crt + - /usr/local/share/ca-certificates/kube-ca.crt + - /etc/ssl/certs/etcd-ca.pem + - /etc/ssl/certs/kube-ca.pem + - /etc/pki/ca-trust/source/anchors/etcd-ca.crt + - /etc/pki/ca-trust/source/anchors/kube-ca.crt + - /var/log/pods/ + - "{{ bin_dir }}/kubelet" + - "{{ bin_dir }}/cri-dockerd" + - "{{ bin_dir }}/etcd-scripts" + - "{{ bin_dir }}/etcd" + - "{{ bin_dir }}/etcd-events" + - "{{ bin_dir }}/etcdctl" + - "{{ bin_dir }}/etcdctl.sh" + - "{{ bin_dir }}/kubernetes-scripts" + - "{{ bin_dir }}/kubectl" + - "{{ bin_dir }}/kubeadm" + - "{{ bin_dir }}/helm" + - "{{ bin_dir }}/calicoctl" + - "{{ bin_dir }}/calicoctl.sh" + - "{{ bin_dir }}/calico-upgrade" + - "{{ bin_dir }}/weave" + - "{{ bin_dir }}/crictl" + - "{{ bin_dir }}/nerdctl" + - "{{ bin_dir }}/netctl" + - "{{ bin_dir }}/k8s-certs-renew.sh" + - /var/lib/cni + - /etc/openvswitch + - /run/openvswitch + - /var/lib/kube-router + - /var/lib/calico + - /etc/cilium + - /run/calico + - /etc/bash_completion.d/kubectl.sh + - /etc/bash_completion.d/crictl + - /etc/bash_completion.d/nerdctl + - /etc/bash_completion.d/krew + - /etc/bash_completion.d/krew.sh + - "{{ krew_root_dir }}" + - /etc/modules-load.d/kube_proxy-ipvs.conf + - /etc/modules-load.d/kubespray-br_netfilter.conf + - /etc/modules-load.d/kubespray-kata-containers.conf + - /usr/libexec/kubernetes + - /etc/origin/openvswitch + - /etc/origin/ovn + - "{{ sysctl_file_path }}" + - /etc/crictl.yaml + ignore_errors: true # noqa ignore-errors + tags: + - files + +- name: reset | remove containerd binary files + file: + path: "{{ containerd_bin_dir }}/{{ item }}" + state: absent + with_items: + - containerd + - containerd-shim + - containerd-shim-runc-v1 + - containerd-shim-runc-v2 + - containerd-stress + - crictl + - critest + - ctd-decoder + - ctr + - runc + ignore_errors: true # noqa ignore-errors + when: container_manager == 'containerd' + tags: + - files + +- name: reset | remove dns settings from dhclient.conf + blockinfile: + path: "{{ item }}" + state: absent + marker: "# Ansible entries {mark}" + failed_when: false + with_items: + - /etc/dhclient.conf + - /etc/dhcp/dhclient.conf + tags: + - files + - dns + +- name: reset | remove host entries from /etc/hosts + blockinfile: + path: "/etc/hosts" + state: absent + marker: "# Ansible inventory hosts {mark}" + tags: + - files + - dns + +- name: reset | include file with reset tasks specific to the network_plugin if exists + include_role: + name: "network_plugin/{{ kube_network_plugin }}" + tasks_from: reset + when: + - kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico'] + tags: + - network + +- name: reset | Restart network + service: + name: >- + {% if ansible_os_family == "RedHat" -%} + {%- if ansible_distribution_major_version|int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%} + NetworkManager + {%- else -%} + network + {%- endif -%} + {%- elif ansible_distribution == "Ubuntu" -%} + systemd-networkd + {%- elif ansible_os_family == "Debian" -%} + networking + {%- endif %} + state: restarted + when: + - ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - reset_restart_network + tags: + - services + - network diff --git a/kubespray/extra_playbooks/roles/upgrade/post-upgrade/defaults/main.yml b/kubespray/extra_playbooks/roles/upgrade/post-upgrade/defaults/main.yml new file mode 100644 index 0000000..aa72843 --- /dev/null +++ b/kubespray/extra_playbooks/roles/upgrade/post-upgrade/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# how long to wait for cilium after upgrade before uncordoning +upgrade_post_cilium_wait_timeout: 120s +upgrade_node_post_upgrade_confirm: false +upgrade_node_post_upgrade_pause_seconds: 0 diff --git a/kubespray/extra_playbooks/roles/upgrade/post-upgrade/tasks/main.yml b/kubespray/extra_playbooks/roles/upgrade/post-upgrade/tasks/main.yml new file mode 100644 index 0000000..d1b1af0 --- /dev/null +++ b/kubespray/extra_playbooks/roles/upgrade/post-upgrade/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: wait for cilium + when: + - needs_cordoning|default(false) + - kube_network_plugin == 'cilium' + command: > + {{ kubectl }} + wait pod -n kube-system -l k8s-app=cilium + --field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}' + --for=condition=Ready + --timeout={{ upgrade_post_cilium_wait_timeout }} + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Confirm node uncordon + pause: + echo: yes + prompt: "Ready to uncordon node?" + when: + - upgrade_node_post_upgrade_confirm + +- name: Wait before uncordoning node + pause: + seconds: "{{ upgrade_node_post_upgrade_pause_seconds }}" + when: + - not upgrade_node_post_upgrade_confirm + - upgrade_node_post_upgrade_pause_seconds != 0 + +- name: Uncordon node + command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - needs_cordoning|default(false) diff --git a/kubespray/extra_playbooks/roles/upgrade/pre-upgrade/defaults/main.yml b/kubespray/extra_playbooks/roles/upgrade/pre-upgrade/defaults/main.yml new file mode 100644 index 0000000..900b834 --- /dev/null +++ b/kubespray/extra_playbooks/roles/upgrade/pre-upgrade/defaults/main.yml @@ -0,0 +1,20 @@ +--- +drain_grace_period: 300 +drain_timeout: 360s +drain_pod_selector: "" +drain_nodes: true +drain_retries: 3 +drain_retry_delay_seconds: 10 + +drain_fallback_enabled: false +drain_fallback_grace_period: 300 +drain_fallback_timeout: 360s +drain_fallback_retries: 0 +drain_fallback_retry_delay_seconds: 10 + +upgrade_node_always_cordon: false +upgrade_node_uncordon_after_drain_failure: true +upgrade_node_fail_if_drain_fails: true + +upgrade_node_confirm: false +upgrade_node_pause_seconds: 0 diff --git a/kubespray/extra_playbooks/roles/upgrade/pre-upgrade/tasks/main.yml b/kubespray/extra_playbooks/roles/upgrade/pre-upgrade/tasks/main.yml new file mode 100644 index 0000000..210818b --- /dev/null +++ b/kubespray/extra_playbooks/roles/upgrade/pre-upgrade/tasks/main.yml @@ -0,0 +1,130 @@ +--- +# Wait for upgrade +- name: Confirm node upgrade + pause: + echo: yes + prompt: "Ready to upgrade node? (Press Enter to continue or Ctrl+C for other options)" + when: + - upgrade_node_confirm + +- name: Wait before upgrade node + pause: + seconds: "{{ upgrade_node_pause_seconds }}" + when: + - not upgrade_node_confirm + - upgrade_node_pause_seconds != 0 + +# Node Ready: type = ready, status = True +# Node NotReady: type = ready, status = Unknown +- name: See if node is in ready state + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' + register: kubectl_node_ready + delegate_to: "{{ groups['kube_control_plane'][0] }}" + failed_when: false + changed_when: false + +# SchedulingDisabled: unschedulable = true +# else unschedulable key doesn't exist +- name: See if node is schedulable + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath='{ .spec.unschedulable }' + register: kubectl_node_schedulable + delegate_to: "{{ groups['kube_control_plane'][0] }}" + failed_when: false + changed_when: false + +- name: Set if node needs cordoning + set_fact: + needs_cordoning: >- + {% if (kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout) or upgrade_node_always_cordon -%} + true + {%- else -%} + false + {%- endif %} + +- name: Node draining + block: + - name: Cordon node + command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + changed_when: true + + - name: Check kubectl version + command: "{{ kubectl }} version --client --short" + register: kubectl_version + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: yes + changed_when: false + when: + - drain_nodes + - drain_pod_selector + + - name: Ensure minimum version for drain label selector if necessary + assert: + that: "kubectl_version.stdout.split(' ')[-1] is version('v1.10.0', '>=')" + when: + - drain_nodes + - drain_pod_selector + + - name: Drain node + command: >- + {{ kubectl }} drain + --force + --ignore-daemonsets + --grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }} + --timeout {{ hostvars['localhost']['drain_timeout_after_failure'] | default(drain_timeout) }} + --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} + when: drain_nodes + register: result + failed_when: + - result.rc != 0 + - not drain_fallback_enabled + until: result.rc == 0 + retries: "{{ drain_retries }}" + delay: "{{ drain_retry_delay_seconds }}" + + - name: Drain fallback + block: + - name: Set facts after regular drain has failed + set_fact: + drain_grace_period_after_failure: "{{ drain_fallback_grace_period }}" + drain_timeout_after_failure: "{{ drain_fallback_timeout }}" + delegate_to: localhost + delegate_facts: yes + run_once: yes + + - name: Drain node - fallback with disabled eviction + command: >- + {{ kubectl }} drain + --force + --ignore-daemonsets + --grace-period {{ drain_fallback_grace_period }} + --timeout {{ drain_fallback_timeout }} + --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} + --disable-eviction + register: drain_fallback_result + until: drain_fallback_result.rc == 0 + retries: "{{ drain_fallback_retries }}" + delay: "{{ drain_fallback_retry_delay_seconds }}" + changed_when: drain_fallback_result.rc == 0 + when: + - drain_nodes + - drain_fallback_enabled + - result.rc != 0 + + rescue: + - name: Set node back to schedulable + command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + when: upgrade_node_uncordon_after_drain_failure + - name: Fail after rescue + fail: + msg: "Failed to drain node {{ kube_override_hostname|default(inventory_hostname) }}" + when: upgrade_node_fail_if_drain_fails + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - needs_cordoning diff --git a/kubespray/extra_playbooks/roles/win_nodes/kubernetes_patch/defaults/main.yml b/kubespray/extra_playbooks/roles/win_nodes/kubernetes_patch/defaults/main.yml new file mode 100644 index 0000000..954cb51 --- /dev/null +++ b/kubespray/extra_playbooks/roles/win_nodes/kubernetes_patch/defaults/main.yml @@ -0,0 +1,4 @@ +--- + +kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests" +kube_proxy_nodeselector: "kubernetes.io/os" diff --git a/kubespray/extra_playbooks/roles/win_nodes/kubernetes_patch/tasks/main.yml b/kubespray/extra_playbooks/roles/win_nodes/kubernetes_patch/tasks/main.yml new file mode 100644 index 0000000..a6c70ed --- /dev/null +++ b/kubespray/extra_playbooks/roles/win_nodes/kubernetes_patch/tasks/main.yml @@ -0,0 +1,41 @@ +--- + +- name: Ensure that user manifests directory exists + file: + path: "{{ kubernetes_user_manifests_path }}/kubernetes" + state: directory + recurse: yes + tags: [init, cni] + +- name: Apply kube-proxy nodeselector + block: + # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" + - name: Check current nodeselector for kube-proxy daemonset + command: >- + {{ kubectl }} + get ds kube-proxy --namespace=kube-system + -o jsonpath={.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}} + register: current_kube_proxy_state + retries: 60 + delay: 5 + until: current_kube_proxy_state is succeeded + changed_when: false + + - name: Apply nodeselector patch for kube-proxy daemonset + command: > + {{ kubectl }} + patch ds kube-proxy --namespace=kube-system --type=strategic -p + '{"spec":{"template":{"spec":{"nodeSelector":{"{{ kube_proxy_nodeselector }}":"linux"} }}}}' + register: patch_kube_proxy_state + when: current_kube_proxy_state.stdout | trim | lower != "linux" + + - debug: # noqa unnamed-task + msg: "{{ patch_kube_proxy_state.stdout_lines }}" + when: patch_kube_proxy_state is not skipped + + - debug: # noqa unnamed-task + msg: "{{ patch_kube_proxy_state.stderr_lines }}" + when: patch_kube_proxy_state is not skipped + tags: init + when: + - kube_proxy_deployed diff --git a/kubespray/extra_playbooks/upgrade-only-k8s.yml b/kubespray/extra_playbooks/upgrade-only-k8s.yml new file mode 100644 index 0000000..13ebcc4 --- /dev/null +++ b/kubespray/extra_playbooks/upgrade-only-k8s.yml @@ -0,0 +1,58 @@ +--- +### NOTE: This playbook cannot be used to deploy any new nodes to the cluster. +### Additional information: +### * Will not upgrade etcd +### * Will not upgrade network plugins +### * Will not upgrade Docker +### * Will not pre-download containers or kubeadm +### * Currently does not support Vault deployment. +### +### In most cases, you probably want to use upgrade-cluster.yml playbook and +### not this one. + +- hosts: localhost + gather_facts: False + roles: + - { role: kubespray-defaults} + - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} + +- hosts: k8s_cluster:etcd:calico_rr + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + gather_facts: false + vars: + # Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining + # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. + ansible_ssh_pipelining: false + roles: + - { role: kubespray-defaults} + - { role: bootstrap-os, tags: bootstrap-os} + +- hosts: k8s_cluster:etcd:calico_rr + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + roles: + - { role: kubespray-defaults} + - { role: kubernetes/preinstall, tags: preinstall } + +- name: Handle upgrades to master components first to maintain backwards compat. + hosts: kube_control_plane + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + serial: 1 + roles: + - { role: kubespray-defaults} + - { role: upgrade/pre-upgrade, tags: pre-upgrade } + - { role: kubernetes/node, tags: node } + - { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true } + - { role: kubernetes/client, tags: client } + - { role: kubernetes-apps/cluster_roles, tags: cluster-roles } + - { role: upgrade/post-upgrade, tags: post-upgrade } + +- name: Finally handle worker upgrades, based on given batch size + hosts: kube_node:!kube_control_plane + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + serial: "{{ serial | default('20%') }}" + roles: + - { role: kubespray-defaults} + - { role: upgrade/pre-upgrade, tags: pre-upgrade } + - { role: kubernetes/node, tags: node } + - { role: upgrade/post-upgrade, tags: post-upgrade } + - { role: kubespray-defaults} diff --git a/kubespray/extra_playbooks/wait-for-cloud-init.yml b/kubespray/extra_playbooks/wait-for-cloud-init.yml new file mode 100644 index 0000000..7aa92d4 --- /dev/null +++ b/kubespray/extra_playbooks/wait-for-cloud-init.yml @@ -0,0 +1,5 @@ +--- +- hosts: all + tasks: + - name: Wait for cloud-init to finish + command: cloud-init status --wait diff --git a/kubespray/facts.yml b/kubespray/facts.yml new file mode 100644 index 0000000..d9ce340 --- /dev/null +++ b/kubespray/facts.yml @@ -0,0 +1,27 @@ +--- +- name: Gather facts + hosts: k8s_cluster:etcd:calico_rr + gather_facts: False + tags: always + tasks: + - name: Gather minimal facts + setup: + gather_subset: '!all' + + # filter match the following variables: + # ansible_default_ipv4 + # ansible_default_ipv6 + # ansible_all_ipv4_addresses + # ansible_all_ipv6_addresses + - name: Gather necessary facts (network) + setup: + gather_subset: '!all,!min,network' + filter: "ansible_*_ipv[46]*" + + # filter match the following variables: + # ansible_memtotal_mb + # ansible_swaptotal_mb + - name: Gather necessary facts (hardware) + setup: + gather_subset: '!all,!min,hardware' + filter: "ansible_*total_mb" diff --git a/kubespray/index.html b/kubespray/index.html new file mode 100644 index 0000000..0a3d17d --- /dev/null +++ b/kubespray/index.html @@ -0,0 +1,47 @@ + + + + + Kubespray - Deploy a Production Ready Kubernetes Cluster + + + + + + +
+ + + + + + + diff --git a/kubespray/inventory/local/group_vars/all/all.yml b/kubespray/inventory/local/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/inventory/local/group_vars/all/aws.yml b/kubespray/inventory/local/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/inventory/local/group_vars/all/azure.yml b/kubespray/inventory/local/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/inventory/local/group_vars/all/containerd.yml b/kubespray/inventory/local/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/inventory/local/group_vars/all/coreos.yml b/kubespray/inventory/local/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/inventory/local/group_vars/all/cri-o.yml b/kubespray/inventory/local/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/inventory/local/group_vars/all/docker.yml b/kubespray/inventory/local/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/inventory/local/group_vars/all/etcd.yml b/kubespray/inventory/local/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/inventory/local/group_vars/all/gcp.yml b/kubespray/inventory/local/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/inventory/local/group_vars/all/hcloud.yml b/kubespray/inventory/local/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/inventory/local/group_vars/all/oci.yml b/kubespray/inventory/local/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/inventory/local/group_vars/all/offline.yml b/kubespray/inventory/local/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/inventory/local/group_vars/all/openstack.yml b/kubespray/inventory/local/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/inventory/local/group_vars/all/upcloud.yml b/kubespray/inventory/local/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/inventory/local/group_vars/all/vsphere.yml b/kubespray/inventory/local/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/inventory/local/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/inventory/local/group_vars/etcd.yml b/kubespray/inventory/local/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/inventory/local/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/addons.yml b/kubespray/inventory/local/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/inventory/local/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/inventory/local/hosts.ini b/kubespray/inventory/local/hosts.ini new file mode 100644 index 0000000..4a6197e --- /dev/null +++ b/kubespray/inventory/local/hosts.ini @@ -0,0 +1,14 @@ +node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases + +[kube_control_plane] +node1 + +[etcd] +node1 + +[kube_node] +node1 + +[k8s_cluster:children] +kube_node +kube_control_plane diff --git a/kubespray/inventory/sample/group_vars/all/all.yml b/kubespray/inventory/sample/group_vars/all/all.yml new file mode 100644 index 0000000..b9639a8 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/all.yml @@ -0,0 +1,140 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## If the cilium is going to be used in strict mode, we can use the +## localhost connection and not use the external LB. If this parameter is +## not specified, the first node to connect to kubeapi will be used. +# use_localhost_as_kubeapi_loadbalancer: true + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies +# http_proxy: "" +# https_proxy: "" + +## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false diff --git a/kubespray/inventory/sample/group_vars/all/aws.yml b/kubespray/inventory/sample/group_vars/all/aws.yml new file mode 100644 index 0000000..dab674e --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/aws.yml @@ -0,0 +1,9 @@ +## To use AWS EBS CSI Driver to provision volumes, uncomment the first value +## and configure the parameters below +# aws_ebs_csi_enabled: true +# aws_ebs_csi_enable_volume_scheduling: true +# aws_ebs_csi_enable_volume_snapshot: false +# aws_ebs_csi_enable_volume_resizing: false +# aws_ebs_csi_controller_replicas: 1 +# aws_ebs_csi_plugin_image_tag: latest +# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' diff --git a/kubespray/inventory/sample/group_vars/all/azure.yml b/kubespray/inventory/sample/group_vars/all/azure.yml new file mode 100644 index 0000000..176b0f1 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/azure.yml @@ -0,0 +1,40 @@ +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values + +# azure_cloud: +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_security_group_resource_group: +# azure_vnet_name: +# azure_vnet_resource_group: +# azure_route_table_name: +# azure_route_table_resource_group: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard + +## Azure Disk CSI credentials and parameters +## see docs/azure-csi.md for details on how to get these values + +# azure_csi_tenant_id: +# azure_csi_subscription_id: +# azure_csi_aad_client_id: +# azure_csi_aad_client_secret: +# azure_csi_location: +# azure_csi_resource_group: +# azure_csi_vnet_name: +# azure_csi_vnet_resource_group: +# azure_csi_subnet_name: +# azure_csi_security_group_name: +# azure_csi_use_instance_metadata: +# azure_csi_tags: "Owner=owner,Team=team,Environment=environment' + +## To enable Azure Disk CSI, uncomment below +# azure_csi_enabled: true +# azure_csi_controller_replicas: 1 +# azure_csi_plugin_image_tag: latest diff --git a/kubespray/inventory/sample/group_vars/all/containerd.yml b/kubespray/inventory/sample/group_vars/all/containerd.yml new file mode 100644 index 0000000..78ed663 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/containerd.yml @@ -0,0 +1,50 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## set "name": "url". insecure url must be started http:// +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# "localhost": "http://127.0.0.1" +# "172.19.16.11:5000": "http://172.19.16.11:5000" + +# containerd_registries: +# "docker.io": "https://registry-1.docker.io" + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/inventory/sample/group_vars/all/coreos.yml b/kubespray/inventory/sample/group_vars/all/coreos.yml new file mode 100644 index 0000000..22c2166 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/kubespray/inventory/sample/group_vars/all/cri-o.yml b/kubespray/inventory/sample/group_vars/all/cri-o.yml new file mode 100644 index 0000000..3e6e4ee --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/cri-o.yml @@ -0,0 +1,6 @@ +# crio_insecure_registries: +# - 10.0.0.2:5000 +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/kubespray/inventory/sample/group_vars/all/docker.yml b/kubespray/inventory/sample/group_vars/all/docker.yml new file mode 100644 index 0000000..4e968c3 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/docker.yml @@ -0,0 +1,59 @@ +--- +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver) +## Valid options are systemd or cgroupfs, default is systemd +# docker_cgroup_driver: systemd + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +# define docker bin_dir +docker_bin_dir: "/usr/bin" + +# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1' +# kubespray deletes the docker package on each run, so caching the package makes sense +docker_rpm_keepcache: 1 + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define 172.19.16.11 or mirror.registry.io +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 + +## Add other registry,example China registry mirror. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com + +## If non-empty will override default system MountFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +# docker_options: "" diff --git a/kubespray/inventory/sample/group_vars/all/etcd.yml b/kubespray/inventory/sample/group_vars/all/etcd.yml new file mode 100644 index 0000000..7206a06 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host \ No newline at end of file diff --git a/kubespray/inventory/sample/group_vars/all/gcp.yml b/kubespray/inventory/sample/group_vars/all/gcp.yml new file mode 100644 index 0000000..49eb5c0 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/gcp.yml @@ -0,0 +1,10 @@ +## GCP compute Persistent Disk CSI Driver credentials and parameters +## See docs/gcp-pd-csi.md for information about the implementation + +## Specify the path to the file containing the service account credentials +# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json" + +## To enable GCP Persistent Disk CSI driver, uncomment below +# gcp_pd_csi_enabled: true +# gcp_pd_csi_controller_replicas: 1 +# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/kubespray/inventory/sample/group_vars/all/hcloud.yml b/kubespray/inventory/sample/group_vars/all/hcloud.yml new file mode 100644 index 0000000..c27035c --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/hcloud.yml @@ -0,0 +1,14 @@ +## Values for the external Hcloud Cloud Controller +# external_hcloud_cloud: +# hcloud_api_token: "" +# token_secret_name: hcloud +# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support +# service_account_name: cloud-controller-manager +# +# controller_image_tag: "latest" +# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +# ## Format: +# ## external_hcloud_cloud.controller_extra_args: +# ## arg1: "value1" +# ## arg2: "value2" +# controller_extra_args: {} diff --git a/kubespray/inventory/sample/group_vars/all/oci.yml b/kubespray/inventory/sample/group_vars/all/oci.yml new file mode 100644 index 0000000..541d0e6 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/oci.yml @@ -0,0 +1,28 @@ +## When Oracle Cloud Infrastructure is used, set these variables +# oci_private_key: +# oci_region_id: +# oci_tenancy_id: +# oci_user_id: +# oci_user_fingerprint: +# oci_compartment_id: +# oci_vnc_id: +# oci_subnet1_id: +# oci_subnet2_id: +## Override these default/optional behaviors if you wish +# oci_security_list_management: All +## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. +# oci_security_lists: +# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q +## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint +# oci_use_instance_principals: false +# oci_cloud_controller_version: 0.6.0 +## If you would like to control OCI query rate limits for the controller +# oci_rate_limit: +# rate_limit_qps_read: +# rate_limit_qps_write: +# rate_limit_bucket_read: +# rate_limit_bucket_write: +## Other optional variables +# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci) +# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above) diff --git a/kubespray/inventory/sample/group_vars/all/offline.yml b/kubespray/inventory/sample/group_vars/all/offline.yml new file mode 100644 index 0000000..83eb8fa --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/offline.yml @@ -0,0 +1,103 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you **DON'T** use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] Flannel: If using Falnnel network plugin +# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# [Optional] runc,containerd: only if you set container_runtime: containerd +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + +## CentOS/Redhat/AlmaLinux +### For EL7, base and extras repo must be available, for EL8, baseos and appstream +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/kubespray/inventory/sample/group_vars/all/openstack.yml b/kubespray/inventory/sample/group_vars/all/openstack.yml new file mode 100644 index 0000000..7835664 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/openstack.yml @@ -0,0 +1,49 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: "ROUND_ROBIN" +# external_openstack_lbaas_provider: "octavia" +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: "1m" +# external_openstack_lbaas_monitor_timeout: "30s" +# external_openstack_lbaas_monitor_max_retries: "3" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/kubespray/inventory/sample/group_vars/all/upcloud.yml b/kubespray/inventory/sample/group_vars/all/upcloud.yml new file mode 100644 index 0000000..c2d7f5d --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/upcloud.yml @@ -0,0 +1,24 @@ +## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi +## To use UpClouds CSI plugin to provision volumes set this value to true +## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD +# upcloud_csi_enabled: true +# upcloud_csi_controller_replicas: 1 +## Override used image tags +# upcloud_csi_provisioner_image_tag: "v3.1.0" +# upcloud_csi_attacher_image_tag: "v3.4.0" +# upcloud_csi_resizer_image_tag: "v1.4.0" +# upcloud_csi_plugin_image_tag: "v0.3.3" +# upcloud_csi_node_image_tag: "v2.5.0" +# upcloud_tolerations: [] +## Storage class options +# storage_classes: +# - name: standard +# is_default: true +# expand_persistent_volumes: true +# parameters: +# tier: maxiops +# - name: hdd +# is_default: false +# expand_persistent_volumes: true +# parameters: +# tier: hdd \ No newline at end of file diff --git a/kubespray/inventory/sample/group_vars/all/vsphere.yml b/kubespray/inventory/sample/group_vars/all/vsphere.yml new file mode 100644 index 0000000..af3cfbe --- /dev/null +++ b/kubespray/inventory/sample/group_vars/all/vsphere.yml @@ -0,0 +1,32 @@ +## Values for the external vSphere Cloud Provider +# external_vsphere_vcenter_ip: "myvcenter.domain.com" +# external_vsphere_vcenter_port: "443" +# external_vsphere_insecure: "true" +# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable +# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable +# external_vsphere_datacenter: "DATACENTER_name" +# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" + +## Vsphere version where located VMs +# external_vsphere_version: "6.7u3" + +## Tags for the external vSphere Cloud Provider images +## gcr.io/cloud-provider-vsphere/cpi/release/manager +# external_vsphere_cloud_controller_image_tag: "latest" +## gcr.io/cloud-provider-vsphere/csi/release/syncer +# vsphere_syncer_image_tag: "v2.5.1" +## registry.k8s.io/sig-storage/csi-attacher +# vsphere_csi_attacher_image_tag: "v3.4.0" +## gcr.io/cloud-provider-vsphere/csi/release/driver +# vsphere_csi_controller: "v2.5.1" +## registry.k8s.io/sig-storage/livenessprobe +# vsphere_csi_liveness_probe_image_tag: "v2.6.0" +## registry.k8s.io/sig-storage/csi-provisioner +# vsphere_csi_provisioner_image_tag: "v3.1.0" +## registry.k8s.io/sig-storage/csi-resizer +## makes sense only for vSphere version >=7.0 +# vsphere_csi_resizer_tag: "v1.3.0" + +## To use vSphere CSI plugin to provision volumes set this value to true +# vsphere_csi_enabled: true +# vsphere_csi_controller_replicas: 1 diff --git a/kubespray/inventory/sample/group_vars/etcd.yml b/kubespray/inventory/sample/group_vars/etcd.yml new file mode 100644 index 0000000..f07c720 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/etcd.yml @@ -0,0 +1,26 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/addons.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/addons.yml new file mode 100644 index 0000000..22eed2a --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,228 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 4443 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.22" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/master +# effect: NoSchedule +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_pool_name: "loadbalanced" +# metallb_auto_assign: true +# metallb_avoid_buggy_ips: false +# metallb_speaker_nodeselector: +# kubernetes.io/os: "linux" +# metallb_controller_nodeselector: +# kubernetes.io/os: "linux" +# metallb_speaker_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_controller_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# metallb_version: v0.12.1 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# avoid_buggy_ips: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 + +argocd_enabled: false +# argocd_version: v2.5.5 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command: +# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 0000000..b974005 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,350 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: false + +# Custom PodSecurityPolicySpec for restricted policy +# podsecuritypolicy_restricted_spec: {} + +# Custom PodSecurityPolicySpec for privileged policy +# podsecuritypolicy_privileged_spec: {} + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 0000000..cc0499d --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml new file mode 100644 index 0000000..60b9da7 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml @@ -0,0 +1,10 @@ +# see roles/network_plugin/canal/defaults/main.yml + +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +# canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +# canal_masquerade: "true" diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 0000000..d6e5bfa --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,245 @@ +--- +# cilium_version: "v1.12.1" + +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/probe/partial) +# cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all conneted clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 0000000..64d20a8 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-ovn.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-ovn.yml new file mode 100644 index 0000000..d580e15 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-ovn.yml @@ -0,0 +1,57 @@ +--- + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 0000000..e4dfcc9 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,64 @@ +# See roles/network_plugin/kube-router//defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 0000000..d2534e7 --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml new file mode 100644 index 0000000..269a77c --- /dev/null +++ b/kubespray/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml @@ -0,0 +1,64 @@ +# see roles/network_plugin/weave/defaults/main.yml + +# Weave's network password for encryption, if null then no network encryption. +# weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +# weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +# weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +# weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +# weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +# weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +# weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +# weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +# weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +# weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +# weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +# weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +# weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +# weave_iptables_backend: iptables + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +# weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +# weave_npc_extra_args: ~ diff --git a/kubespray/inventory/sample/inventory.ini b/kubespray/inventory/sample/inventory.ini new file mode 100644 index 0000000..99a6309 --- /dev/null +++ b/kubespray/inventory/sample/inventory.ini @@ -0,0 +1,38 @@ +# ## Configure 'ip' variable to bind kubernetes services on a +# ## different ip than the default iface +# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value. +[all] +# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1 +# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2 +# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3 +# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4 +# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5 +# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6 + +# ## configure a bastion host if your nodes are not directly reachable +# [bastion] +# bastion ansible_host=x.x.x.x ansible_user=some_user + +[kube_control_plane] +# node1 +# node2 +# node3 + +[etcd] +# node1 +# node2 +# node3 + +[kube_node] +# node2 +# node3 +# node4 +# node5 +# node6 + +[calico_rr] + +[k8s_cluster:children] +kube_control_plane +kube_node +calico_rr diff --git a/kubespray/inventory/sample/patches/kube-controller-manager+merge.yaml b/kubespray/inventory/sample/patches/kube-controller-manager+merge.yaml new file mode 100644 index 0000000..a8aa5a7 --- /dev/null +++ b/kubespray/inventory/sample/patches/kube-controller-manager+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10257' \ No newline at end of file diff --git a/kubespray/inventory/sample/patches/kube-scheduler+merge.yaml b/kubespray/inventory/sample/patches/kube-scheduler+merge.yaml new file mode 100644 index 0000000..0bb3950 --- /dev/null +++ b/kubespray/inventory/sample/patches/kube-scheduler+merge.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10259' \ No newline at end of file diff --git a/kubespray/legacy_groups.yml b/kubespray/legacy_groups.yml new file mode 100644 index 0000000..0d01710 --- /dev/null +++ b/kubespray/legacy_groups.yml @@ -0,0 +1,47 @@ +--- +# This is an inventory compatibility playbook to ensure we keep compatibility with old style group names + +- name: Add kube-master nodes to kube_control_plane + hosts: kube-master + gather_facts: false + tags: always + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + +- name: Add kube-node nodes to kube_node + hosts: kube-node + gather_facts: false + tags: always + tasks: + - name: add nodes to kube_node group + group_by: + key: 'kube_node' + +- name: Add k8s-cluster nodes to k8s_cluster + hosts: k8s-cluster + gather_facts: false + tags: always + tasks: + - name: add nodes to k8s_cluster group + group_by: + key: 'k8s_cluster' + +- name: Add calico-rr nodes to calico_rr + hosts: calico-rr + gather_facts: false + tags: always + tasks: + - name: add nodes to calico_rr group + group_by: + key: 'calico_rr' + +- name: Add no-floating nodes to no_floating + hosts: no-floating + gather_facts: false + tags: always + tasks: + - name: add nodes to no-floating group + group_by: + key: 'no_floating' diff --git a/kubespray/library/kube.py b/kubespray/library/kube.py new file mode 100644 index 0000000..cb9f4f0 --- /dev/null +++ b/kubespray/library/kube.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = """ +--- +module: kube +short_description: Manage Kubernetes Cluster +description: + - Create, replace, remove, and stop resources within a Kubernetes Cluster +version_added: "2.0" +options: + name: + required: false + default: null + description: + - The name associated with resource + filename: + required: false + default: null + description: + - The path and filename of the resource(s) definition file(s). + - To operate on several files this can accept a comma separated list of files or a list of files. + aliases: [ 'files', 'file', 'filenames' ] + kubectl: + required: false + default: null + description: + - The path to the kubectl bin + namespace: + required: false + default: null + description: + - The namespace associated with the resource(s) + resource: + required: false + default: null + description: + - The resource to perform an action on. pods (po), replicationControllers (rc), services (svc) + label: + required: false + default: null + description: + - The labels used to filter specific resources. + server: + required: false + default: null + description: + - The url for the API server that commands are executed against. + force: + required: false + default: false + description: + - A flag to indicate to force delete, replace, or stop. + wait: + required: false + default: false + description: + - A flag to indicate to wait for resources to be created before continuing to the next step + all: + required: false + default: false + description: + - A flag to indicate delete all, stop all, or all namespaces when checking exists. + log_level: + required: false + default: 0 + description: + - Indicates the level of verbosity of logging by kubectl. + state: + required: false + choices: ['present', 'absent', 'latest', 'reloaded', 'stopped'] + default: present + description: + - present handles checking existence or creating if definition file provided, + absent handles deleting resource(s) based on other options, + latest handles creating or updating based on existence, + reloaded handles updating resource(s) definition using definition file, + stopped handles stopping resource(s) based on other options. + recursive: + required: false + default: false + description: + - Process the directory used in -f, --filename recursively. + Useful when you want to manage related manifests organized + within the same directory. +requirements: + - kubectl +author: "Kenny Jones (@kenjones-cisco)" +""" + +EXAMPLES = """ +- name: test nginx is present + kube: name=nginx resource=rc state=present + +- name: test nginx is stopped + kube: name=nginx resource=rc state=stopped + +- name: test nginx is absent + kube: name=nginx resource=rc state=absent + +- name: test nginx is present + kube: filename=/tmp/nginx.yml + +- name: test nginx and postgresql are present + kube: files=/tmp/nginx.yml,/tmp/postgresql.yml + +- name: test nginx and postgresql are present + kube: + files: + - /tmp/nginx.yml + - /tmp/postgresql.yml +""" + + +class KubeManager(object): + + def __init__(self, module): + + self.module = module + + self.kubectl = module.params.get('kubectl') + if self.kubectl is None: + self.kubectl = module.get_bin_path('kubectl', True) + self.base_cmd = [self.kubectl] + + if module.params.get('server'): + self.base_cmd.append('--server=' + module.params.get('server')) + + if module.params.get('log_level'): + self.base_cmd.append('--v=' + str(module.params.get('log_level'))) + + if module.params.get('namespace'): + self.base_cmd.append('--namespace=' + module.params.get('namespace')) + + + self.all = module.params.get('all') + self.force = module.params.get('force') + self.wait = module.params.get('wait') + self.name = module.params.get('name') + self.filename = [f.strip() for f in module.params.get('filename') or []] + self.resource = module.params.get('resource') + self.label = module.params.get('label') + self.recursive = module.params.get('recursive') + + def _execute(self, cmd): + args = self.base_cmd + cmd + try: + rc, out, err = self.module.run_command(args) + if rc != 0: + self.module.fail_json( + msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err)) + except Exception as exc: + self.module.fail_json( + msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc))) + return out.splitlines() + + def _execute_nofail(self, cmd): + args = self.base_cmd + cmd + rc, out, err = self.module.run_command(args) + if rc != 0: + return None + return out.splitlines() + + def create(self, check=True, force=True): + if check and self.exists(): + return [] + + cmd = ['apply'] + + if force: + cmd.append('--force') + + if self.wait: + cmd.append('--wait') + + if self.recursive: + cmd.append('--recursive={}'.format(self.recursive)) + + if not self.filename: + self.module.fail_json(msg='filename required to create') + + cmd.append('--filename=' + ','.join(self.filename)) + + return self._execute(cmd) + + def replace(self, force=True): + + cmd = ['apply'] + + if force: + cmd.append('--force') + + if self.wait: + cmd.append('--wait') + + if self.recursive: + cmd.append('--recursive={}'.format(self.recursive)) + + if not self.filename: + self.module.fail_json(msg='filename required to reload') + + cmd.append('--filename=' + ','.join(self.filename)) + + return self._execute(cmd) + + def delete(self): + + if not self.force and not self.exists(): + return [] + + cmd = ['delete'] + + if self.filename: + cmd.append('--filename=' + ','.join(self.filename)) + if self.recursive: + cmd.append('--recursive={}'.format(self.recursive)) + else: + if not self.resource: + self.module.fail_json(msg='resource required to delete without filename') + + cmd.append(self.resource) + + if self.name: + cmd.append(self.name) + + if self.label: + cmd.append('--selector=' + self.label) + + if self.all: + cmd.append('--all') + + if self.force: + cmd.append('--ignore-not-found') + + if self.recursive: + cmd.append('--recursive={}'.format(self.recursive)) + + return self._execute(cmd) + + def exists(self): + cmd = ['get'] + + if self.filename: + cmd.append('--filename=' + ','.join(self.filename)) + if self.recursive: + cmd.append('--recursive={}'.format(self.recursive)) + else: + if not self.resource: + self.module.fail_json(msg='resource required without filename') + + cmd.append(self.resource) + + if self.name: + cmd.append(self.name) + + if self.label: + cmd.append('--selector=' + self.label) + + if self.all: + cmd.append('--all-namespaces') + + cmd.append('--no-headers') + + result = self._execute_nofail(cmd) + if not result: + return False + return True + + # TODO: This is currently unused, perhaps convert to 'scale' with a replicas param? + def stop(self): + + if not self.force and not self.exists(): + return [] + + cmd = ['stop'] + + if self.filename: + cmd.append('--filename=' + ','.join(self.filename)) + if self.recursive: + cmd.append('--recursive={}'.format(self.recursive)) + else: + if not self.resource: + self.module.fail_json(msg='resource required to stop without filename') + + cmd.append(self.resource) + + if self.name: + cmd.append(self.name) + + if self.label: + cmd.append('--selector=' + self.label) + + if self.all: + cmd.append('--all') + + if self.force: + cmd.append('--ignore-not-found') + + return self._execute(cmd) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(), + filename=dict(type='list', aliases=['files', 'file', 'filenames']), + namespace=dict(), + resource=dict(), + label=dict(), + server=dict(), + kubectl=dict(), + force=dict(default=False, type='bool'), + wait=dict(default=False, type='bool'), + all=dict(default=False, type='bool'), + log_level=dict(default=0, type='int'), + state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']), + recursive=dict(default=False, type='bool'), + ), + mutually_exclusive=[['filename', 'list']] + ) + + changed = False + + manager = KubeManager(module) + state = module.params.get('state') + if state == 'present': + result = manager.create(check=False) + + elif state == 'absent': + result = manager.delete() + + elif state == 'reloaded': + result = manager.replace() + + elif state == 'stopped': + result = manager.stop() + + elif state == 'latest': + result = manager.replace() + + elif state == 'exists': + result = manager.exists() + module.exit_json(changed=changed, + msg='%s' % result) + + else: + module.fail_json(msg='Unrecognized state %s.' % state) + + module.exit_json(changed=changed, + msg='success: %s' % (' '.join(result)) + ) + + +from ansible.module_utils.basic import * # noqa +if __name__ == '__main__': + main() diff --git a/kubespray/logo/LICENSE b/kubespray/logo/LICENSE new file mode 100644 index 0000000..8f2aa43 --- /dev/null +++ b/kubespray/logo/LICENSE @@ -0,0 +1 @@ +# The Kubespray logo files are licensed under a choice of either Apache-2.0 or CC-BY-4.0 (Creative Commons Attribution 4.0 International). diff --git a/kubespray/logo/OWNERS b/kubespray/logo/OWNERS new file mode 100644 index 0000000..52acd54 --- /dev/null +++ b/kubespray/logo/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - thomeced diff --git a/kubespray/logo/logo-clear.png b/kubespray/logo/logo-clear.png new file mode 100644 index 0000000..3ce32f6 Binary files /dev/null and b/kubespray/logo/logo-clear.png differ diff --git a/kubespray/logo/logo-clear.svg b/kubespray/logo/logo-clear.svg new file mode 100644 index 0000000..00798d6 --- /dev/null +++ b/kubespray/logo/logo-clear.svg @@ -0,0 +1,80 @@ + + image/svg+xml + + + + + + + + + + + + + + + + background + + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubespray/logo/logo-dark.png b/kubespray/logo/logo-dark.png new file mode 100644 index 0000000..5fc3660 Binary files /dev/null and b/kubespray/logo/logo-dark.png differ diff --git a/kubespray/logo/logo-dark.svg b/kubespray/logo/logo-dark.svg new file mode 100644 index 0000000..d9d8cb5 --- /dev/null +++ b/kubespray/logo/logo-dark.svg @@ -0,0 +1,83 @@ + + image/svg+xml + + + + + + + + + + + + + + + + background + + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubespray/logo/logo-text-clear.png b/kubespray/logo/logo-text-clear.png new file mode 100644 index 0000000..b841240 Binary files /dev/null and b/kubespray/logo/logo-text-clear.png differ diff --git a/kubespray/logo/logo-text-clear.svg b/kubespray/logo/logo-text-clear.svg new file mode 100644 index 0000000..c24f788 --- /dev/null +++ b/kubespray/logo/logo-text-clear.svg @@ -0,0 +1,107 @@ + + image/svg+xml + + + + + + + + + + + + + + + + background + + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubespray/logo/logo-text-dark.png b/kubespray/logo/logo-text-dark.png new file mode 100644 index 0000000..1871c0f Binary files /dev/null and b/kubespray/logo/logo-text-dark.png differ diff --git a/kubespray/logo/logo-text-dark.svg b/kubespray/logo/logo-text-dark.svg new file mode 100644 index 0000000..171df35 --- /dev/null +++ b/kubespray/logo/logo-text-dark.svg @@ -0,0 +1,110 @@ + + image/svg+xml + + + + + + + + + + + + + + + + background + + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubespray/logo/logo-text-mixed.png b/kubespray/logo/logo-text-mixed.png new file mode 100644 index 0000000..a4b3b39 Binary files /dev/null and b/kubespray/logo/logo-text-mixed.png differ diff --git a/kubespray/logo/logo-text-mixed.svg b/kubespray/logo/logo-text-mixed.svg new file mode 100644 index 0000000..ed058c0 --- /dev/null +++ b/kubespray/logo/logo-text-mixed.svg @@ -0,0 +1,110 @@ + + image/svg+xml + + + + + + + + + + + + + + + + background + + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubespray/logo/logos.pdf b/kubespray/logo/logos.pdf new file mode 100644 index 0000000..ed7a1f5 --- /dev/null +++ b/kubespray/logo/logos.pdf @@ -0,0 +1,1772 @@ +%PDF-1.5 %âãÏÓ +1 0 obj <>/OCGs[6 0 R]>>/Pages 3 0 R/Type/Catalog>> endobj 2 0 obj <>stream + + + + + application/pdf + + + kubespray-v1 + + + + + 2016-03-01T16:55:12+01:00 + 2016-03-01T16:55:12+01:00 + 2016-03-01T16:55:12+02:00 + Adobe Illustrator CS5 + + + + 256 + 172 + JPEG + /9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgArAEAAwER AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE 1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp 0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo +DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A7Np3lL8yLR7dZ9bS+UtH 9ankubiNlRZbaQqkYRkchbeSPlVSwkq29eSqQaP+WP5nWtrYxX3mK8uWgF/63+5275F7q1hihLSm xrKscqSOiso9LkCGfcEqmk/5c+f7yF3l8ytaXDJZxJDDdao8QSH0vX5ut1bu3Lg+6CORid5Ap4BV W8u+SPzJhmgfXfMhli+sWks0FpPcfBFaRek8IeVSZVujDDLISEYN6gBPIlgqybyP+ZllJqd9o/mI PqGoSXAgiubi7a3to5Ln1YXWO4/SETSJETGVSKNKUAAI5Eqirjy7+bM2lS2r+YYjc3AkL3ERjt2h aQSqoiZbN6xxgxkAgSFqn1KDiwVEW2ifmnx8xQXutWt1bXFqYfLrLWCeOYKVElxJDAnEtszFA25+ FVA3VS7XJfzatNat7s3Ma6Ze32mQx6Xp6tdmFGdVvfUmOnkiKhZi8jJtuHjpwdVHaT5T8/yT6dPr +vGVltrmPVEtZZIxJMHnSzMXpR20YX0bxjKfTDc44ftceWKsw0a2vLXSLG2vZfXvILeKO5nqW5yo gV35NRjyYE1OKozFXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7F XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX Yq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXY q7FXMCQQNie+KpIY/MUKwRm4V2Z1U8VLgAKORZinTY9SCT+1+ziqOsYtVSUm8mSSL01CqtKiQE8i SFTtT+gxVG4q7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FUDqkGpyKp0+VYpArLVzQVJ Ug04uDspG474qgTF5huTcKlx6cayzopI9MlSg9LiTFWiv3/FsVbii8zSRAi4VGMTANIqj4+RCniF qDxCsa9OlD2VTKyj1BGm+tyJIGYGHhtReIBB2H7VT3xVE4q7FXYq7FXYq7FXYq7FXYq7FXYq7FXY q7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq 7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7 FXYq7FXYq7FXYq7FXYq7FXYq7FXYqp3V1bWlvJc3UqQW8Q5SzSMERVHdmNAMQFJp5J5v/wCcg9Ns pza+XLYai6Gkl5NySDbsiijv89vauZMNMTzcLJrANo7rvKH/ADkFpd9MLXzHbDTpGNEvIeTwfJ1N XT57j5Yz0xHJOPWA7S2es29zb3MCXFtKk8Eo5RyxsHRge6stQRmM5gKpirsVdirsVdirsVdirsVd irsVdirsVdirsVYN52/N7yv5YElssn6R1Vaj6lAwojf8WybhPlu3tluPCZe5oy6iMPe8H80/mh5w 8xXBe4vntbUH93Z2rNFEo7VoaufdiczIYYxddk1M5daTXyZ+dXmrQHWC+kbV9Or8UNw5Myj/AIrm NW+hqj5ZGeAHlszxauUee4e9eUvP3lnzVBz0u5H1hRWWylok6fNK7j3WozDnjMebsceWM+TIsg2O xV2KuxV2KuxV2KuxV2KoXVdRh03S7zUZgTDZQSXEoXqViQu1PoXCBZpEjQt8p+Z/OnmzzpqSx3Ly TLI9LTTLcMY1JOwWNftt/lGpzYwxxgHT5Msshr7E4j/I38wn03679UiV+PIWTSqJyPCn2K+xb8cj +YjbP8nOrYTcabe2t+bC8ia0ulcRyRzgxlCTT4uXQZaCCLDjmJBo7PTprjXvy4j0/UtFuGfTHMcG sadLMJ4TcsplHD4I+ImhAdWAr47Zjismx5udK8VEcuoe8WWtaddWOn3gmSJNTjjls0kZVZxIgcKo J3PE9BmGYlzxIEWrS6jp8V0lpLcxR3Uilo7dnUSMqgklUJqQADjS25dR097L68tzE1kFL/Wg6mLi vVudeNBTxxpbULnzBoNqYxc6lawGVBJEJJ405I3Rl5MKqexxESpkFU6rpYsjfG8gFkOt0ZE9Lc0+ 3Xj1PjjRWwoW/mTy7czJBb6pZzTyGkcUc8TMx8AoYk48JQJDvXT+YNBt7sWc+pWsN4SALaSeNZCT 0HAty748JTxDkvvtZ0iwdUvr63tHcVRZ5UjJA2qAxFcQCVJAXDVdLNk18LyA2S/auvUT0hWnV68e /jjRWwsstc0W+kMVlqFtdSAVKQzRyNT5KTiYkKJAq9xe2du0SXE8cLTtwhWR1Uu/8qgkcj7DBSbc l7ZvcyWiTxtdRANLAHUyKrdCyg1AONLbcV5aTTywRTxyTwU9eJGVnTluvNQarXtXGltj35l6pe6X 5F1i+spDFdRwhY5V+0vqOsZZT2IDbHLMQuQDXmkRAkPnnyD+Weuec5ZJoJUttPhk4XN5IeR5EciE Qbs1DXeg98zcmURdXhwHJv0eq3H/ADjr5VbTzFBfXcd9xAFy5R05dyYgq7e3L6cxhqZW5p0UKeay /lXrmkecLHSNREUqXJaW2eK4EP1hIiCyRMysVk32BT+uZHjAxsOKNMYzAKf/AJh2EtlPZecdGueV 9Y+g8lx6ySGW2NI7S6IVUJMvBlk7V2G3SvEb9Jbs4qpjmHv8EvqwRygU9RQ1PDkK5huevxV2KuxV 2KuxV2KuxV2KpJ56/wCUJ8w/9s28/wCTD5PH9Q97Xl+g+4vLvyi0K98v+Un852wt76O8jle7tJEM c8UFo8isYJ6uOTcSzIVAbbcUy/PK5cLi6WHDHi73okt55s/xBa2y3NlH61rNctpZRmoIpIkAa4DB qt6p+IRUFPsnKKFOXZtinnmPXNS0WHzqnoaY2hj61YQqGmuJkJ4uk0wMYRGBPwqD88sx0Dw97TlB ri/mvItQ/MzzDqGmTadcrC9tPC8Ei0enxziZGALUBiA4R9lX33zKGEA24MtTIiiz7zte31l5c/K+ 7sIfrN9DDA9rbkE85RBb8FoKE1bKYAEytychIjAjn+xX8hw2GpeX/N+t6q73HnNILyG/FyKPbp6L BVjT9kGhU+FOOw6jJsQB9KcNEEn6+q+z/wDWa2/4xy/91FsT/e/juRH+4/HexlbZrjzboka6APMZ /wAPWB/Rxl9Ef7zJ+85kEfD7+OWX6TvW7XXqG3F6QoSAQeSfPFm8LaZMt/Zv+giWYWymXYq7fb5b KT7DxGP8UfvQdoTHLcbM7/L7RI3vtJkuPy/TTUSJJV1s3SyEOkfJJPSIDVdgO9RXKcktj6rcjFHc eivNhz+W5dCsNSTzR5ZfXLG4mkmPmmwnDTKpNDICPUA3q3x03+1XLeKyOE15NPh8IPFGx3qvma98 s3XnLypcTWl7r+iNokSrbgc7yQK9wq8xGY+Toy/FQ9sYA8J6G1yGJnHYyHCgNV0TULby55s1K20u 60Ty1dtZrZWF5yDlxMh5hX+KgHLf/KoCaYRIWBdljKJEZGqjsiUg0+fW/J1tY+X5vK19JPA36VnM qrOAFqI9qMzGlD7gGgOCzRs2yocUaHCUZ+bmq2mtebdTgbUEtB5bsgdPBbiZb0yxvIqeLcKr81GD CKiPNOokJSO9cI+1N/M2rT29n5f/ADY0ZFaeSJbXWrcbJJyBjPKn8silP+B8MjCPOBZ5JUBlHxZp +VPlqbS/L51O/Pqa1rrfX9Qmb7VZasiH/VDVPuTlWWVmhyDfghQs8yqfm/8A+S41v/jHH/yeTHD9 QXUfQUj8u6Hpum/lvpet6ez6XeLaQTXl3bHj6iOVMzTqwdHopY8mWq9iMlORMiGOOIEARtsry22i 2mq6jrpsrqysHsoEs9dIkEnrxNO8skjEtOEYSRjlMvBuO+3HBZIATQEielc/mlP5oaBZQ/ls2szC SfXG+qyPqM7FrhWlZQ6qdhGlHI4IAvtk8UvXXRr1EahfV4E1xOy8Gkdkoo4liRRPsintXbMynV2X 2lYf7wW3/GJP+IjNWXfq+BXYq7FXYq7FXYq7FXYqlPm+1nu/KWt2tuhkuJ7C6ihjXqzvCyqo+ZOS gakGGQXEjyYF+SuveX9U8ix+V55lF9ALmG4s3bi8kU0juWj6EjjLxNNwR8suzxIlbj6WYMOHqnDa DcxXcfl/0bea9lge5TzJK8xvAImWPm1P3hmHqCnGZV67KPhyHF1+xu4en2oH80tY0Hy7+Xk/l43Q kvprdLW2ty4adtxWVx1A2JJPfJYomUra9RMRgQ+awCSABUnoMz3Uvqqz8jx3+keS5b6WS3u/LkNv J6KAUaRIYwyPUV2aPNcclE11dzHFtG/4UTd/l5pc3m3/ABJBPJbTXELW+p2iUMN1Gy8CJB1BpTp4 DvgGQ8NJ8IcXExs/kdAIH0yPzFqCeXZJPUfSKgoSG5U5V49d/sdd+uT8frW7V+VFVZ4e5MdX/Ka2 udZh1XStXu9FlgtY7KNLUjaKJeCqGqGpxA2PhgGXaiLZywAmwSOiiPyX0X9CanYS6hd3F9q7xSXm qTMJJSYn9QAAjoT1qa++Pjmwe5H5YURZ3RejflxqunXVo7+bNSurO1Kg2MpUxSRqKemwPIcSNjtg lkB6BlHER/EUvb8m2iS6sNN8x39hoN4zmbSo6FOMn2kVidlI2+z061yXjdSN2H5foCQO5O9P/LbR dP8AMGk6vZyyxjR7I2FvamjKykyEu7EV5EzMTkDlJBHe2DCBIEdBSZ+cfK9r5o0C40a5meCKco3q x0LAxuHGx27ZGE+E2yyYxONFDa95H07WbXRreaaSMaJcQ3Fu68SzegAOLVHRqb0wxmRfmieMSryQ Wj/lZ5Vsjfy39tHrN3qNxJcT3V9FFI4MhJ4oePw9Sajv9GSOWR5bMY4Ii7F2hk/KjTU8lXflJb+4 NjcXP1mGVgheIclYRjahFU/HHxjxcSPAHBw9GaWdslraQWyElII1iUt1IRQorT5ZUW9KPPGgza/5 T1PSIGCT3UJEJOw9RCHQE+BZQDk4SogsMkeKJDzLyN+aUPl+zh8n+dbGWwls1+rpcSJVDEahVmSn 2eOwZahh95vyYuL1RcTFn4fTPZkkeu+Qk1y9t7jWreTQbOztLm1s3uhJbCV5JvUCpyPqcVWIrHuF rsOmV8Mq5bt3iR4jvtQee/mv+b9n5ksDoejQuunmRXuLyUcWl4GqqidVXlQ1O/sO9+HDwmy4mo1I kOEMH8neTdY806tFY2ELGEsPrV1Q+nDHX4mZuladB1OXTmIhx8WIzNB9exxrHGsaCioAqj2AoM1j u12KuxV2KuxV2KuxV2KuxV2KvOfOv5L6Jrcz6lpEh0jWa+oJYqiF5OvJlG6tX9pPuOXwzkbHcONl 0wluNi8p8z6v+bvlfUozq99cxzJA9pbXw4yRyROQzBZePxMeINT8Y26ZkQjCQ2cPJPLA7lj/AJe8 o+bPOOoObGGS7kZq3V/Ox9NSe8krVqfbdj4ZZKcYhqhinkOz3byL+Snl/wAvNFfagf0pq0ZDpI4p BE43/dx9yD+03zAGYeTOZe52OLSxhvzL0bKHJdirsVdirsVdirsVdirsVdirsVdirsVdiqUeZPKf l/zJZm11ezS4UA+nL9mWMnujj4l/V45KMzHkwnjEhReI+Z/+cfPMNrd8vL8qahZuTxSVlimj9mLU RvmKfLMuGpHVwMmiN+lNfJn/ADj4/NLvzXMAoNRpts1a/wDGWUdPkn/BZGep/ms8Wj6yezabpena XZpZadbR2trH9iGJQqj326n3zFJJ5ucAAKCKwJdirsVdirsVdirsVdirsVWTTQwRPNM6xQxqXkkc hVVQKksTsAMVSf8Ax15J/wCph03/AKTIP+a8n4cu4tfiw7x81Obzn5DnjMU2u6VLG32ke7t2U/MF 8eCXcV8WHePm6Hzn5DgjEUOu6VFGv2US7t1UfIB8eCXcV8WHePmqf468k/8AUw6b/wBJkH/NePhy 7iviw7x83f468k/9TDpv/SZB/wA14+HLuK+LDvHzd/jryT/1MOm/9JkH/NePhy7iviw7x83f468k /wDUw6b/ANJkH/NePhy7iviw7x83f468k/8AUw6b/wBJkH/NePhy7iviw7x83f468k/9TDpv/SZB /wA14+HLuK+LDvHzd/jryT/1MOm/9JkH/NePhy7iviw7x83f468k/wDUw6b/ANJkH/NePhy7iviw 7x83f468k/8AUw6b/wBJkH/NePhy7iviw7x83f468k/9TDpv/SZB/wA14+HLuK+LDvHzd/jryT/1 MOm/9JkH/NePhy7iviw7x83f468k/wDUw6b/ANJkH/NePhy7iviw7x83f468k/8AUw6b/wBJkH/N ePhy7iviw7x81S384+UbmeO3ttc0+aeVgkUUd1C7sx6BVDEk4DCXckZInkQm+RZuxV2KuxV2KuxV 2KuxV2KuxV2KuxV2KuxVhP50Oy/lnrJUlTS3FQabG6iBH0g5bg+sOPqv7svljNi6dXFjem2NyLeQ 2w6zhG4dafapTrgsMuE1dbKABJAAqT0GFCb6/wCWb3RUspJ5Emhv4BPDJEHAB5FXjcSKhDoykEZC MwWzJiMavqlGTanYq7FXYq7FXYq7FXYq7FXYq7FXYq2rMrBlJDA1BGxBGKX25mpd+lvmDzDpmg6e b7UZCkPIRoFUszuQSFUDuQp67ZPHjMzQa8uWMBcnluo/nZrL3vLT7OGKzU7JMGeRh7srKBX2GbCO ijW53dZPtGV7DZmvlP8AMrSdfR4jFJb6jFG0r2oUyc1QVb0ioq3yoD88xcumlD3OZg1ccnvYnoXn ZofNdxq1xKP0TfHhcOscqRiBWMcV3xLPQ/WD6DdaU7HbJzx3Guo82MMp4r6HyP48nreYbmuxV2Ku xV2KuxV2KuxV2KsI/Or/AMllrP8A0bf9RcWXYPrDj6r+7P46vJ/yq8m6JJF+n/NtjK2jyMBp103x WnJGZZTc8DyVQwABcBOtcyM2Q8o83E02EfVIbPah5ksI5YdOg0e6bS5YJHjmjgQQ+jGUR+NvUTMn 75R8MW9dgRvmJw9bdhxdKeXedvK/lBdQt/Mnl7TJpdJspRJrc1szQ2gQkAPbtVGZ0Y1/c/CKb0zI hOVUS4mXFG+IDbr+P1JD5r/MLy75g0GawuI7p5yGuLeRzXhdRMIYf2uklvV5f8r+Y/Fk4YjE215c 8JxpIfJPkhfMuna/cieSOfSLQ3NvBGnMzPxdgnWu/p028cnkycJHm0YcPGCe5H6t+XdhoWmafFrW oNF5l1OSMQ6VCquIYpHC852r4V2HfbsTgGUkmuQbDpxEDiPqK3U/y7tbP8zIPJ63jtbzSQr9aKDm BLGHPw1piMtw4kSwAZOG03/5Vd5XsdK1jVNZ1i5gtdM1KXTkMMCuz8CoU0qdzy+WR8aRIADZ+WgA STyK9Pyf0u417y/BZatJNouvwTzwXLRcJl9CPnTgxGzchvTB45o7bhfyoJFHYpNrvlz8sbCG/htv MN5NqdqJUit2tGCtNHUBCxAoCwpXJxlM9NmuePEL3Np3c/lh5H0V9P07zLrtzaatqESyrLHCPqcZ fYK0jA1o21eQ96ZAZZHcDZsOmhGhI7lAaH+Wvl6TSdd1XWtbMNlot59Ua5s0E8cgqqrIpUsTyaQb DpkpZTYAHNjDTxomR5FHwfk7pF/d6Bc6TrL3Wga3JNB9aMPCWOSGGWWnFqVr6DLuBT3yPjkXY3DL 8rEkEH0lRj/LHyhqj6xY6Br08usaQksklvc2/CNxA3F6OOg5bV/DD4shRI2K/l4SsRO4S/8ALj8s F83WF9eXF6bFInFvYniGEtwUMhU1I2VQOnjksuXhLXg0/GCSgrLyAL/yfqeq2c8j61os5j1PSyg+ GIEjmhBqaUNfkcJyVIDoUDBcCR9QUPPHk218qx6XayXbTa1c24uNRteICW/OnBKgklvtV+Ve+OOf FfcjNiEAB1fWea53D53/AOciPO/mPQ/POnWdlcBtNOmxzS6dMiyW8jvPMjM6HvxQAMDVf2SMvxbb hxs53o8kFfjzVFDrUenT22lxyoq6LZ840+sL9YjkWQmQvyV4A3xTNRg1BtmQeKQB3LjRqBI2A6D8 fpQlv5o80aRqmi2Qhj0bUJPq7amkMQSZ3kk6SFi5VXTi3BeK79OmP1xs7oJ4JgRFW+lxpmmhQgtI QgCqF9NKBUbmgpTor/EPA75g2XZUicCXYq7FXYq7FXYq7FXYq7FWEfnV/wCSy1n/AKNv+ouLLsH1 hx9V/dn8dUB+VNvHqf5PQadFMoklhvrWRvtem8sstOQH+TIGp4Yc207Rpt8QHvRbX+rG7iurieZP NscDxwaJHbFrUo5QyAMT8SlkX9/6ygbVA3QxofBss/FrzvBJpv5Q31tfPGtwlkkcvE/B6rMo4qT1 +I0GOPeezHNtjN9z5ezYumeofkvr0Oiad5tvjNDHdQWAmtI5mC+pJEsrKoBILfFQUHjmPnjZAc3S S4RI+SJ8xX+j+cNF07zsksVvr2kSRR65Y8lVpI43BEsSsQWp1Htt+zkYgxJj0LORGQCfUc2U33lm PUfzNtPPNvqtgfLyLHcSTmdQw9KLhxp03IHU7fPKxOocNbtpx3kE7HCv0fXZ9S8seYZPLWpWMF9d a3cTW5vmQK0DMh5cHDfaG4quMo0RfcyjPiieEi7RS6rpg83eUJNX1Oym8ywQXceoS2so9ARtCxSt aKpJ6dK79qYKPCaGyeIcUbPqY159P5jX+k6tHe63o82hgvMbaGSH1TFE3qIqVTlyoooOVT0yzHwA jY205hkINkcKeaAfNSQ6bFBrOl+ZPJskcXryaiEjnhhABkVhVviRenMt4GmQlw9xEmyHFtREopbD 5j8paL5Z87TaPFY31gupRGz0yfi8EocQI5WIn4o1fmy02222yXDImN9zDjjGMiKIv9STeS/zKvtc 8++Xba9Sz0nRrBrkwWtsoggV3tZVBPJiK1ag6dcnPEIxPUteLOZzA5D9ia6b58ttdl86aDdLY6bG 9veSWmoWlLd5vTcqokkDfvCwIPuK9sgcfDRbI5uIyidua231/wAjeU/LHlTS9RubuW9t5I9Yl/RZ hcCZqnhcFm+zRytF3oMeGUiSEccMcYg+/ZUsr3TtI/PCO7sL+D9DeY7Z7idxInp1eJ3IY1oGM0XI V/mpiRePfmEg8ObblJ5P511ObU/Nur3s0wnMl3KEkBBUxo5WMKR+yEUAe2ZOMVEODmlcz732Hmsd 28n/ADu/Jq787SW2saRcJFrFnAbdreaojniDF0UOK8GVnbqKGu5FMnCdNWTHxPL/ACppn5hadp/m Cx1fR55bnRbOFtNa5tTOY3NzFFSN6MsqCEuVUllAWo2BzJhk3AvZxcmPYkiyGU+Rvye806tq9vr/ AJnd7WESpcNFOS11MVIIDCtYxtT4t/bHJmAFBji00ieKT3/MN2LsVdirsVdirsVdirsVdirsVYR+ dX/kstZ/6Nv+ouLLsH1hx9V/dn8dXzt5W87eZPK9yZtIu2iRjWa2b44ZP9eM7V9xv75mzxiXN1mP NKHJn3/QwF22rwam+jRmSKyktXjE5CmSR0f1BVDRaxj4fxyn8ttVuV+d3umD+bvP/mbzVPy1S6P1 ZW5RWMVUgTwotfiI/mYk5bDGI8nGy55T5scyxpdirsVdirsVdirsVdirsVdirsVdirsVdir7dzUv QOxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxVKfNnly28yeXr3RbmRoorxVHqpuVZHEiNTvRkG3fJQl wm2GSAlGi8gP/ONd7U8dejI7E2zA0/5GHMn815OF+R8/sa/6Fsv/APq+xf8ASO3/AFUw/mvJfyPn 9jv+hbL/AP6vsX/SO3/VTH815L+R8/sd/wBC2X//AFfYv+kdv+qmP5ryX8j5/Y7/AKFsv/8Aq+xf 9I7f9VMfzXkv5Hz+x3/Qtl//ANX2L/pHb/qpj+a8l/I+f2O/6Fsv/wDq+xf9I7f9VMfzXkv5Hz+x 3/Qtl/8A9X2L/pHb/qpj+a8l/I+f2O/6Fsv/APq+xf8ASO3/AFUx/NeS/kfP7Hf9C2X/AP1fYv8A pHb/AKqY/mvJfyPn9jv+hbL/AP6vsX/SO3/VTH815L+R8/sd/wBC2X//AFfYv+kdv+qmP5ryX8j5 /Y7/AKFsv/8Aq+xf9I7f9VMfzXkv5Hz+x3/Qtl//ANX2L/pHb/qpj+a8l/I+f2O/6Fsv/wDq+xf9 I7f9VMfzXkv5Hz+x3/Qtl/8A9X2L/pHb/qpj+a8l/I+f2K9j/wA42lbuJr7WxJaqwMscUBV2UHdQ xcha+NDgOq8kjQ77l7fmI57sVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVd irsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdiqyeeCCJpp5FihQV eRyFVR4knYYQLQTTEX/NjyYl99V+syMteJuljJhB+f2qe4XMj8pOrpxTrcd1bKob61uLT63bSC5g Kl0eEhwwA/Zp1OY5iQaLlCQIsMY8m+dhrWoXljOV9VS89gVQxl7VGETllLPRo5iUO+W5MdbhqxZe IkFl2UtzsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVe Ffml+YFhf+ff8DXkM9oLWSCK1vonEkck93HG6+tAQlFHqBQwcldzQ1oMrTz4N3C1OPxPTdMHu9f8 u2ujTXv1G8mhiu4bX68JESvrRyuWEJQr8Poj4DJvXqMyp55A9HChp4SiefvZh5S13RINZbyfGJ79 Nb/0a6uWIhihYryR4oiJC7KafE1Pl415ZmQ4uVN+CEYS4Od/J6nYflzotjqEN9bzXCzQzRTJ8S7+ nCYmUkKCRKx9STuze22YhzSIpzRhiDaV+ePzCk8r+cdGtbuVYtDuLa4mvvg5yFo0f01Q+JcKMOPH xRPexy5uCQvlul1v50883XlLWvOMiwWOmLAzaJYFPUkIDBRNK9fuHc+3UmEeIRQMkjEy6dET5k8+ a9YflTp3mW3MQ1S6S1MrMlUrKKuQte+McYM6WeUjHxddkmu/P3nC7856po9trGl6Na2UUcsT36hV YtGjFQzHxcn5ZIY48INEsTlkZkWBSFuvzZ83L+Xaawv1dNRXV/0abwRk28sXovJ6iBj/ADLQmn9h GGPFXkxOolwXtd15Mg0HzF5uN1Nc3XmPQ9atbO2nubjT9MdXuHEcbFePHoOfHfISiO4hthORPOJ9 zHtD8+fmNrdour6bq2lXFyZST5W+CO4KBuPFS5DmoFR8X47ZOWOINEH3tUMs5CwR7k61XzR+YN/+ YV35Z0K4s7BLazjuyLxOZFUjLryXnVuUvbagyAhERstkskzPhFcmP3v5wecbfy7fRv8AUxrOmarD p817GOdvJHKlweS1NNmt+vgemWDDG/Kmo6mQHSwa8k/tvOPnbSfOmjaLq17p2s22sVQiwWksNADz anRd679QD0yvgiYkjam3xJCYBo2s85/mrquj+dlsrOON9B01raPXJSvJgbkkmjA7cUpT/K2OGGIG Pn0RkzmMqHIc/imGpfmHf+WvOl7p/mNo/wBBXFq95o90icWPpryaEmtGY0KjuTx/myIx8UbHNlLN wyqXLonn5c6t5k1ny6ur66scT30jS2VvGnHhbHZORJ35dQfCmRyAA0GeGUpRssoyttdirsVdirsV dirsVdirsVdirsVdirsVdirsVfK//OQPlzzJo/5jzebYYHOn3LWs9rfIpaOOa3iSPhIaUDcoeQB6 g/PL4EVTjZYkStV03zzp2o6DcanLDJDaQTx28vlyNIjYu0qtIAtfgEf7pvtRMw23Y/FmREgjhrfv cWYMfVZru/H6kb+Vei655g/MG315LT0rC3uGubmdUKQJQHjEh6V3CgDtgykRhS6eMp5OLo+lswXZ vO/PHkqXzD+YXl2S7sWutBhguFv5K0VWCs0YahDbvxy7HPhie9x8uLinHbbdJrHyf5v0/TvMPkQR PcaHdwyyaDqTEFI2Y8xDIa1Xkdjt9rfocmZxJEuvVhHHIAw6dEmv7bz/AK35L03yN/he4tZ7Z4o5 tRlYC39OGoVq0499/iPTatdpAxEjK2uQnKAhwojUvLOq2Pn7Wr678nv5k065jijtXJVVBWOMF1qH 68SPEYBIGIF0yljImSY8QSy78gedz5Altv0VIFm1pL200VZBI0Fv6MytvXYEuq+PcjJDJHi59GBw z4Kr+K6Zp5ZkvItQaPTPIA8vXk8EsceqP6bRxtwLJ6gURsULqtaGuVS5bytyIc9o8PyYdr2gaxrW ntZyeQJLLzUzgfpazPoWnINUybH0zyXY1brvXtlsZAH6tmicDIfT6u9PY/ysudY8/wAr+aLaW702 LSrZDfrIVWa8iihiY8gQ5rSQ75Dxajt3szg4p+rcV9qN/MX8tLaLydZaN5U0olf0nDPdRxsWdk9K VGd3kbkacx32wY8vqsnonNgHCBEdVS6/LweW/Pegav5S0+RLFhPHq8Ubl0VAg4n969fjqadqqMfE 4okST4PDMGI26ses/wArvPusaDrt7e6gNNutdla5udGkgSRpWhcyQhpiwaL4ztxHTrkzliCKHJq/ LzkDZq+iO81eWPNHmD8s/LVvcaVK+uWF1HBcwtxEgt0V4mcnl0cJGTkYTEZnfZnkxylCII3t7DHH HFGscahI0AVEUUAAFAAMxnMXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYqo3n1P6pN9d9P6pw b6x63H0/Tp8XPl8PGnWuKvL7b/oXr6he/VvqH1H6/F9a9L1/T+tcW9Pjx29Pjy+x+7pXLY8d7NE/ Dreqenad+j/qMH6O9H6jwH1b6vx9Hh24cPhp8srN9W4V0RGBLsVdirsVdirsVdirsVdirsVdirsV dirsVf/Z + + + + + + uuid:913f56bd-df5d-44c0-b3be-eca34c05695b + xmp.did:EF4573EEC5DFE5119DEB9028B95FA11D + uuid:5D20892493BFDB11914A8590D31508C8 + proof:pdf + + uuid:f0631bd5-a9d1-428d-a89d-920c76a2d4f9 + xmp.did:CDBBFD7004DFE511929AB49D43F9FC52 + uuid:5D20892493BFDB11914A8590D31508C8 + proof:pdf + + + + + saved + xmp.iid:CDBBFD7004DFE511929AB49D43F9FC52 + 2016-03-01T16:25:17+01:00 + Adobe Illustrator CS5 + / + + + saved + xmp.iid:EF4573EEC5DFE5119DEB9028B95FA11D + 2016-03-01T16:55:11+01:00 + Adobe Illustrator CS5 + / + + + + + + Print + + + False + True + 1 + + 1280.000000 + 720.000000 + Pixels + + + + + DINPro + DIN Pro + Regular + Open Type + Version 7.504; 2005; Build 1002 + False + DINPro.otf + + + + + + Cyan + Magenta + Yellow + Black + + + + + + Groupe de nuances par défaut + 0 + + + + Blanc + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 0.000000 + + + Noir + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 100.000000 + + + CMJN Rouge + CMYK + PROCESS + 0.000000 + 100.000000 + 100.000000 + 0.000000 + + + CMJN Jaune + CMYK + PROCESS + 0.000000 + 0.000000 + 100.000000 + 0.000000 + + + CMJN Vert + CMYK + PROCESS + 100.000000 + 0.000000 + 100.000000 + 0.000000 + + + CMJN Cyan + CMYK + PROCESS + 100.000000 + 0.000000 + 0.000000 + 0.000000 + + + CMJN Bleu + CMYK + PROCESS + 100.000000 + 100.000000 + 0.000000 + 0.000000 + + + CMJN Magenta + CMYK + PROCESS + 0.000000 + 100.000000 + 0.000000 + 0.000000 + + + C=15 M=100 J=90 N=10 + CMYK + PROCESS + 14.999998 + 100.000000 + 90.000004 + 10.000002 + + + C=0 M=90 J=85 N=0 + CMYK + PROCESS + 0.000000 + 90.000004 + 84.999996 + 0.000000 + + + C=0 M=80 J=95 N=0 + CMYK + PROCESS + 0.000000 + 80.000001 + 94.999999 + 0.000000 + + + C=0 M=50 J=100 N=0 + CMYK + PROCESS + 0.000000 + 50.000000 + 100.000000 + 0.000000 + + + C=0 M=35 J=85 N=0 + CMYK + PROCESS + 0.000000 + 35.000002 + 84.999996 + 0.000000 + + + C=5 M=0 J=90 N=0 + CMYK + PROCESS + 5.000001 + 0.000000 + 90.000004 + 0.000000 + + + C=20 M=0 J=100 N=0 + CMYK + PROCESS + 19.999999 + 0.000000 + 100.000000 + 0.000000 + + + C=50 M=0 J=100 N=0 + CMYK + PROCESS + 50.000000 + 0.000000 + 100.000000 + 0.000000 + + + C=75 M=0 J=100 N=0 + CMYK + PROCESS + 75.000000 + 0.000000 + 100.000000 + 0.000000 + + + C=85 M=10 J=100 N=10 + CMYK + PROCESS + 84.999996 + 10.000002 + 100.000000 + 10.000002 + + + C=90 M=30 J=95 N=30 + CMYK + PROCESS + 90.000004 + 30.000001 + 94.999999 + 30.000001 + + + C=75 M=0 J=75 N=0 + CMYK + PROCESS + 75.000000 + 0.000000 + 75.000000 + 0.000000 + + + C=80 M=10 J=45 N=0 + CMYK + PROCESS + 80.000001 + 10.000002 + 44.999999 + 0.000000 + + + C=70 M=15 J=0 N=0 + CMYK + PROCESS + 69.999999 + 14.999998 + 0.000000 + 0.000000 + + + C=85 M=50 J=0 N=0 + CMYK + PROCESS + 84.999996 + 50.000000 + 0.000000 + 0.000000 + + + C=100 M=95 J=5 N=0 + CMYK + PROCESS + 100.000000 + 94.999999 + 5.000001 + 0.000000 + + + C=100 M=100 J=25 N=25 + CMYK + PROCESS + 100.000000 + 100.000000 + 25.000000 + 25.000000 + + + C=75 M=100 J=0 N=0 + CMYK + PROCESS + 75.000000 + 100.000000 + 0.000000 + 0.000000 + + + C=50 M=100 J=0 N=0 + CMYK + PROCESS + 50.000000 + 100.000000 + 0.000000 + 0.000000 + + + C=35 M=100 J=35 N=10 + CMYK + PROCESS + 35.000002 + 100.000000 + 35.000002 + 10.000002 + + + C=10 M=100 J=50 N=0 + CMYK + PROCESS + 10.000002 + 100.000000 + 50.000000 + 0.000000 + + + C=0 M=95 J=20 N=0 + CMYK + PROCESS + 0.000000 + 94.999999 + 19.999999 + 0.000000 + + + C=25 M=25 J=40 N=0 + CMYK + PROCESS + 25.000000 + 25.000000 + 39.999998 + 0.000000 + + + C=40 M=45 J=50 N=5 + CMYK + PROCESS + 39.999998 + 44.999999 + 50.000000 + 5.000001 + + + C=50 M=50 J=60 N=25 + CMYK + PROCESS + 50.000000 + 50.000000 + 60.000002 + 25.000000 + + + C=55 M=60 J=65 N=40 + CMYK + PROCESS + 55.000001 + 60.000002 + 64.999998 + 39.999998 + + + C=25 M=40 J=65 N=0 + CMYK + PROCESS + 25.000000 + 39.999998 + 64.999998 + 0.000000 + + + C=30 M=50 J=75 N=10 + CMYK + PROCESS + 30.000001 + 50.000000 + 75.000000 + 10.000002 + + + C=35 M=60 J=80 N=25 + CMYK + PROCESS + 35.000002 + 60.000002 + 80.000001 + 25.000000 + + + C=40 M=65 J=90 N=35 + CMYK + PROCESS + 39.999998 + 64.999998 + 90.000004 + 35.000002 + + + C=40 M=70 J=100 N=50 + CMYK + PROCESS + 39.999998 + 69.999999 + 100.000000 + 50.000000 + + + C=50 M=70 J=80 N=70 + CMYK + PROCESS + 50.000000 + 69.999999 + 80.000001 + 69.999999 + + + + + + Gris + 1 + + + + C=0 M=0 J=0 N=100 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 100.000000 + + + C=0 M=0 J=0 N=90 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 89.999402 + + + C=0 M=0 J=0 N=80 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 79.998797 + + + C=0 M=0 J=0 N=70 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 69.999701 + + + C=0 M=0 J=0 N=60 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 59.999102 + + + C=0 M=0 J=0 N=50 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 50.000000 + + + C=0 M=0 J=0 N=40 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 39.999402 + + + C=0 M=0 J=0 N=30 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 29.998803 + + + C=0 M=0 J=0 N=20 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 19.999701 + + + C=0 M=0 J=0 N=10 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 9.999102 + + + C=0 M=0 J=0 N=5 + CMYK + PROCESS + 0.000000 + 0.000000 + 0.000000 + 4.998803 + + + + + + Couleurs vives + 1 + + + + C=0 M=100 J=100 N=0 + CMYK + PROCESS + 0.000000 + 100.000000 + 100.000000 + 0.000000 + + + C=0 M=75 J=100 N=0 + CMYK + PROCESS + 0.000000 + 75.000000 + 100.000000 + 0.000000 + + + C=0 M=10 J=95 N=0 + CMYK + PROCESS + 0.000000 + 10.000002 + 94.999999 + 0.000000 + + + C=85 M=10 J=100 N=0 + CMYK + PROCESS + 84.999996 + 10.000002 + 100.000000 + 0.000000 + + + C=100 M=90 J=0 N=0 + CMYK + PROCESS + 100.000000 + 90.000004 + 0.000000 + 0.000000 + + + C=60 M=90 J=0 N=0 + CMYK + PROCESS + 60.000002 + 90.000004 + 0.003099 + 0.003099 + + + + + + + + + Adobe PDF library 9.90 + + + + + + + + + + + + + + + + + + + + + + + + + +endstream endobj 3 0 obj <> endobj 8 0 obj <>/Resources<>/Font<>/ProcSet[/PDF/Text]/Properties<>/XObject<>>>/Thumb 16 0 R/TrimBox[0.0 0.0 1280.0 720.0]/Type/Page>> endobj 9 0 obj <>stream +H‰ìWÛŽä¸ }¯¯ð”F©Ûk&@ž‚ ÈC> ‘dé °Éÿ9¤l—årUÙ}Ûíí`€ÓeIyxxøí/߇oþî‡?üñûpúåä‡ü@¡øá¬OÿùÇéïÿOÞOƒw‘þrbü¥‡§oú›þõßÓ/~Ç?’O.¸ˆ#&ž~>é/?ŸÎÂÎ3 ç ®Hžñ¦:ñoŠ‹! çX]ær±Sr„-¦O§3ùêH6Är¢ˆïh`—°ˆ‚ÇÿÇÌvuµ¶Åã›TuÁóåEÎðL·7,і̧,ȶÅè‘ Q÷}&*Žj¦;]ìvëiÅ¢1n÷|J޳L§¹(4ù2ZÍÕ§ÓhŽwyžív×¶…Acýtúç鯋œÁQ©ðªf')ÔKVéä’yŽØ„58ÂÂ9?ãÞ1”ÙÔK³`ÑåÕ9Êb#iNØrèyžÌìôüyb¶ßí„ɰ\Í–.5ߦ—m.û?~²[û¡ýû±¸D„#¨‚prNñr},¸ lg‚+–uXAU9zŽ¢î.À½"ÕqICt>…‹!•."ý!>«+Hue˜ž 0~7ø¼­Ö{iZÛÆŒ%ih'¶çæLûJd‹ï„iÜv6ô¼-Äê]–DCt!¤Ô¡@°N]°Ç¬Ò¥¨Q5Ð.g³K•-ÙêÒ@¨n“˜É¾ÅCƒ˜©ÎÏA!gwµK°­1Cñ¶ð­uh;ã}¬À;`Zó‚컯ðìÒÜ!Çdñ0O‘Êlø×·ï%8,¨YKºÚ ¾ƒòå6 ø –(¸ ‚ìBFp%"³¾äM`+ÊH(=äh ê±@è¤CC] h(ÇÑPœ¢A´Û¤úÑ<ëÝÁÎùë‚¡ *q܆ݢC¸†¼C¾†ð1Ôw#Ç×€áÓ÷ˆâ]¥}=â÷ „’œ\¯éŸ 9ºÂ¹~éþ`å¿x'ö‚äócV¸…¼œò'œB­zÑëù.ó-Pî0Bƒ¸‡d!ÄŽdAqAü‚ñ!TÆe»5,ù`‰½lp èUl@/B©¸k ›x|È.:È +åÁð +;ª×Ãæðø!ü{Ã<;„\{N¼… J؃…¸ ² ñ&â Ghav@aooè ðã49¼ +Ÿ½;LƒÃãî°‚½SÃA Ы@p¸;LcÃkºÃ¼×Ô »¦†a¾pg˜§†¯Ø¼ù¢Ÿà)ކ[“=Úo•aIa °w–7f‰ðÁ³DèbS¼¾d•ŸDb iÊ €’¼5sŽS/€Ãwøc5mæ=Ž÷^úÈ#ÐL£2ÑM^W Ù •MÌl‘JøÍ‘Ê>ÄŒS –K¸î;á¥|CG8ü^„3*½^ñcì<&œ#à¹&œ÷ÏQº™§˜ZWt|;×”³ :y'tÂ+¡Ão i´Ùݽjk¶¹†N^@ç£f›ãjœz^ßg·šF¡Ýjcz5jnMCÃÝjš’^Ó­vá&îÁ¼7ñ-q3NÿïTÛÀ§ª/Ù©†ö¯ #Þî‹çk^T²ŒX«Ï ¿¢ðøœá Ù¦.à?g‘q6@å°„U5Ùe=\„Q삸vDQ"Ô\–rqíëÛ3;Üaü,:ÎXTpc”LE*¢®æ¨Ç`~¦éN’§ŸNçP\(¥mƒü29ª±0z´æR¿Ìͯ¶Tœd«IZûÕù‚ÿ‘4e B‘ÑkYbÕæj(Æ ˆVL„yŠ×Óé'K˲xƒ „„3RˆÊ™“qVg³ºá3ÂV±!´VõB îzjs/PíÜ~¸ LÑ=.f®Óçö"0&$«€ùMtɃ±®ÅÂmÇbà8‡:íÁÈ}Ôp&d¥EùPθ‹fqV¢V“A[jWß~ŽzÓS{ƒ/¢"1މjon_츨j]+¹ÄÕ~” ÿ­rÁ¢°Büu#kQk2$¬¨|¡Ù"çVe³A­,Õ«²¥n£d¹¬§Eâx‘Ê ^Ǩ²Lœ–‘Xéèe—ŒÊ¦R&C¯ìúf²‹Ï.ĪæZ¸ÍÁŒ°Â#»-‚ÒE°äª—!7nB¢lŒÀ•†P­C(uÍKyÆu‘+¯þ&âš}¶ÏÛ TTeólzðh&Æíà„}¥§)ÂÀÅd«@WiKz>µêÁþÕ„jIö»ö¤ÙDD͈½0~Iv8l¯< +Ä 1£š²•#KK@› ¾ôD¡Ë³f7(¤FAª¹´B*UF-Ð" +"j.ÆëVhE¨d‰Ä-\‚¸Q„²æd¡2qÇùS<$;Áb‹Æ‚´{{Žv;mx-‚¹ñšWhßÁqU+ઈÊÖJZýê1ÆCl/šç)AÍÅó%_zñs»jÊF(Q·«ª‡4 Bxrf4šŸ›µ•$(pY*2a–€Td9F±HŸ‘ü làmõ¬Ï¸˜(M'±y%´_õDC[j»LF o"CcÑn‹ÿ|Jö¢Eùl,­Í4#ëe¶Ñh¨›#Ùr6Ú b¡¼|ãk#͆+¾@E †€b*ËY ÚÍMBp6uFÛb8{«UpöVÄŒ€Do>ÒØÚµØ3/FM—WÉ<„ ”¼ˆÇä²^Hô×ZfKŒ>ž–×eŽËûkĺ€h@§€- ‰t ˆe#¶t)Ué ëaùl‹z>š–¶e˜jŒI ÀZ"Øzœ¨6¶ ++ÚØÎÀG¨ÄÎÙPXÍ€Ó”ÈsçÇÙlH7m¾…~KBÈIQš?‰¾J{õU: ¯ø˜¾ +羚B&­_þ5ô½¾*êA}•WúªÜÑWõôUx¬,}L_ñm}•:y;yÅ+yÅwåUy¥¼ +ky%½¼¢•¾zg•n ,¹'°ÐŽ rÞXy)°è‹êJa…Na…¥Â¿}{‰ÖKŽI,y/‰P¸ÞúâµÄŠ7$Ö¥ãâžîŠ,ùp‘%kUÖ+ÞÕXtWc•Nc¥^c…zDdÌXÿ"+ßYi-²êA‘•Ö"‹—"+“XtScÕ½«ô«ÜÑXq‘®] ±r’~l ¯ªj‰ÒKu—¼\wå[º T}^ÖÊKzåÅ åÅò’^y¥ÚK/rG{åÚKzíE+ñ{í•{íué€Wú‰PKÞDB‹.ûcK•UÃ× QÖi²ð@“…^“¥^“ñ•& Wš,®4YXi2¾Òdy­É|]‰2^‰²|%ÊäJ”••(‹+QW¢,­DYîDY§ÉJ¯É¨ÓdtÑdµ+ºÂúABqy£7‘U>'±–‚v¬£Znk5‹õ^±F½Xãcbí¨VK‡´Z¡•VÃÌqG«¥·jørÒo ›¨ã‡ä›/·õ›üÚúmª÷wÓo¡p¼SÀí"ÇIÚeà0æMeÇ7”]Z(»Tk¯ìä×Vv\J») ÛÒŽH»²[Úíb¸Iô¥äôš¯Þ×|²Ö|ù æ“µæ£¥æ‹Úæ–ª/ÝW}\oª¾|SõåÚ˾Ô˾Ô˾´”}G‹üãªú‚Ÿ!² ‡ +ÙˆÄ'ÖWK“X±8g[…½L[¡©ñÕ\ô-§MÊýýjÉn$Ç{ÂI€¿óx3 ß=3EªôI—zj¦Ûó¼p‚Ê$ ¢ÁháÂàD­˜ç›ÝM¶X^}¥t~ðu]¨7ãcÖ‡Ò§52Žm1nÙߢÕä¸s¤FaÙ Ÿ®¶{½}Ͱm÷uBÅçDÀw–_õó4ÌáË×n»¯¾…aJׇÿ-éÁmz…’TmøM¤Ô(=WòY3ÿÑÄÞ.†Â˜ÙLÕØˆ•ÞØ+BIWC{¼¾ˆ@Œ~øgVvr•?s¤oPÔRñk1V¡âÆbåê'ú³_Æßb,íã/V?F±ínð¼{£cTª;ú:ºøkC0*3•a”ÁF¥³!Fe1’×Å¢a'Y:xm#ŒGðx0ˆìþÛsJ}øjNˆ}c›£.FK¾3ÖB°Æ‘yáXÒØ/0¸è¸N4N=*6¶…jHàÆwýÏäWPŒb0 +¹—'0ˆGa O`Ð^ =†|3 +ž ³9 Ú•ójèõ.ΛĿ„¶"AŸ !ÎPˆ+â†ø º ¡Oh íûh À!’ÍFíG¢¡ ‚•hÀlfì? ŠØ0z@Ãá± !=BCÐP¡!ýnŒsz û.!ý’ò±.ñÏE‚rÀ‘·šÄßz‰œÍ~tƒ`5tãÅŸØ ‚Ý…¯pêã·ë`‘14æ,©sƒyî› ’f®H3DÒ +‘:u†Hz0X¤?|ÆÈ!rä8…Ô#’ŽPH~H!2Qˆ¼ šŠ‘5gÄoféO@3xå%fä fþwhåbJºÔžIµ\JûCêL8q&œvœpòAÂÑC„“NžçmìŸØ©nJõ5v^ÎwÀó+áü'Áómº)Q xJ¹áãàù•sa§ÄNz;òWb'£²J¬G°s¸Y-ØI°S'ìü)aûýVU/­æw óÏlWZÒcí*ýN»zšÿÞ8t 5pX[{«]ÂM>‚}7ù/Å vÉ]ÿߪî‡eUÉÆ?²U}øß‘ŽÛJo˜4ñQoñd±æ•×Ê*ã:ºDÛ3!Àÿ¹ˆagÔ"lEau†£¢ÿô¸!ŒfþÁ숢D¤¥|*íâ`Òôg¹À…ñZ¾€|jp%Ó)Æ,nNvϲ=#ÎEëöâçéœÚ%-l¤W0?öì'lf*ùfõ{ù§ +ÀX5haíCê4üGÎÈQ ¥[–WÚÒ-͘"áÉŠ‚©š¶x}žþeY™Ð)`F<42ï™—­¼F¨[¿D1€öÎ[¨Á§fµk`UÀhWÿ=Á˜Ê=®fíÛë¶ä‚A€ÚW2èÔk€„áɶc3pœSßöð«#œ˜8«GùPÎR/Ì4⬠<šڢ݃ÿœÁŸ'_ÁPL¡ç‘(_A¸C³ã29 ómãµØ_ºýžb1ø{á‚'”°Büù k™%‰²‡êiq·¢á“Eé6Hë’&Þɖ܆d9—Ó5qœG,Ë—T¨kÞXEj•C_–ŠºŽ­m=Hæ½™)šßãù’rç‹ÜÒ°m÷ƒœ¤5×À‰d‘,·¼dªÎLÈ“‚Œ·æe +’›e¼Â[¤*ð~ÃDXk¨öº/  ºØÍ¶D‡‰ÛáöO#À@ÅÑê•8'ˆÓÁ×)F/ìßQL(Õb¿³%íf‚ÛLˆ-½;v ‹0èË(¦j•&ÇËÊ + ôhHǘà9C"Ln"g T±•OߠȢZ•Šîp3V·â@'2<K$¼TP vg’æf¡0áãþ*Š`±E[AÚƒ=góŽýÎ#XÖ9ÀßÃÅ *¯ßNDUk$^¾<ÆhHlÁoqÞäW<_óEÆëæj©Æ'™Û¢òãˆl—†ßó^i Þèú¡Í(i›õH4YVTTRß v6èìaµŽ)Ñ+æÎÇÝt¤ŽM‡%zG‹$åBb—ß\úû‚«>\`é«â*}•\ºJ.™$—,’KWÉUúª¹j_D—¬¢«¾]ºŠ®x£ºò*ºê*ºto ‡†H“*ÚBZË}9¶¨±ôB¥U•UÉ5–oÔXºQcòZ…~#ÇäFŽÕ_䘾”cùFŽåoɱgj,.j,^ÕØ2Û\šØÔ‰$Âyã7Õ5¡­RU£P¸¾R♟ȴ¶È´¼È4¹‘iq•i²Ê´öB¦½PiéV¥•U¥Å™v£ÓZ¼Ñi5>Óiå…L+eÚ\‹OùЦàC´DÜã;Ò­ÎÒ-NÒ-´ÇÚMgíûxK‹xK³xï¾­Þ¶zßÕ[ºUoú=õ¦7ê-­òMÊ·#ìˆ6­UüᎰ“®L®ô¾J;]¥þyi×n¥]»•v[îK;y*íÚ"íÚ*íR¨íŽ0\Œuˆ‹ÌîÑÞ}ý¹èÓwEŸÞоøTô•ç¢OúCÕWª¾ÚWÙWVÙWVÙWfÙ÷Ý¢)‚rΚ½H§9í¦‹ÑÃΧ2¢‡fZ7ljðÚ~ËÖµ¿v$‚²vH«â,h¸dA(læ¨Ál0£o¸`H4„à|µ2Co"yÒ;\£›Ý ͺ'®BY˜&áWBÕ˜Rõä:U)pfeø àœm«uL˜‰ºÿ™'ªZ¦ú5Ø»²Z]2ðŠ%E™V'(¤Z`ÏŠ÷!Šw §ƒênÃÁæZb,ÀCÙf4Ñ š¦½z?îŽDoh”¡j<ÞI.Ý)gv+IöÉfYA¤ÙÞðe3êEü«ë´†Æ9rD%†2جd,¾g× oìU6¿öá˜Ø\p¬\=“·$—p Ç@̶Ïnú1¾àÿ¾vÛQ76 wGÌÒ‚c˜#fQ mÖÇæ^Ê¢bƒ';´Í%T1žt±»þ°QÔÑ 3,ëÐ! k–CÄ0#æaª±üØf~ÆçÉÍq…¯a“h¿ˆZÔ +b… (¥Aµ¡„Ó”¬Ãd,Ê%Ø0íÐ8CqCEdj6™Q7´lõ2)lC +?aàƒnõåϸV+Åß«S`†é%¦o … z + Ìa27“€K{d6k Ͱ÷Ø »(€ÜÖØÇÚ.A(Û‚ ŠT¶N‘„Ä’kžý$RܬÏË£m¦#eÀî̪ÍŒ=ØŽ’|GYCouZBB*aŠRÍÊK¡a03™3¦“öéž›w4ú0À +&†‰Ñ +IŠÙ@ÇÛ±$6YœËÚÁiÑU-ô©édj9@4CS‡¶¬Ðñ8 +4oöpœYϺ[Ø*_‹ó¦6™MØf+M\¢ÁŽ Lˆ0pD$¤´˜®ªÀ^;d³Ø%™ŽÍ.ök 6š¤j–$Ñ <ŠbT„E:nÝK%.È-¬ +(P¢ÄÂÓIþH8‚=u#B ±³ôÍ`lÆã›aoîwæN]‰²wb.ÖEk×èpk²…'ŸŒÒx øjܬ¯Sw,» w20è‡|¬u]ŠºR }%3Lšn;މí߬—7z,G„ý=/~¥ÅyÖ$xwã¬ê¼Gzë“=Ó%R„XÆI'HÉ —HÓõ*0Œb‘^Ÿ&Ã*ÑUa¥(°á8‘¶–(˜ÒMŠk{"Œ ÍéU3KÕ´@x^õª‘heà O¨4f›!6Qq’tèY_)Ñ3N»…ˆ ­ +¥kkEµ•¨„àzc~«7ãGkAlÚ€^RjB@q»ÛTi<Ç'\°ez*¶¥­¦{¨£ÚÞ7èLý2BLàâ;úYYä¤wKîTKˆ ÅôeB›/5>™Ù0¯è4Ë]BY©ù¸ÍéÛ,‚5rL’ìPX¯ÓqŒ¯F‹ÏO/Eò;£[>ËKyÃ~t¨cð³ßïÉ$|ÄYí´âϾý ]2H¢ìwEª¹ëV€f„¬…/^up‚«IOHÖwPG±)˜Âœ³Ž@CrV>Hq²Aø­É2yD8, +u KFnì~4S EŸæÑL!3‹6;ËÐdzljTV>ö<Á*ù;?J ƒE*æõ„êŽÜkl<¨Ößïé Xò,W¢áïvåöÿ9ωŸKŸøÜD‹  n$ÈñÙO¬H»½÷Vý»fâ¬DÁU¤³Ï‰qp„:ÐìÝf|ÚŸQéo¨IiW |êÔçAÌ»ÚÛg·—ªíÿ)UÛ¿HÕ•Ôîò`é¡U–€xäØ"¿)8Iž7lƒÜg˜TªZúyÄÁX&¼ |•v#ÛÎñ„˜Gq䉪zc„ž(èmjta´ikmhÅFœVІ“ÈÐQ,+C4䉂⑰§¯„Œ)ÏwhU£÷rê¦P 9#j¹;šm!œ{1âxÕÊ{[¬éCLÃ%"KèR§Ž95²À8ºã4ƒ°Ì`×Eâstb`o!ri¼Ò •èOÈnC¼­õµP]fµ ‡¢?rêj]Só¹È‚+æ[0zY¼•fŒÞÍ2o”HL×VúÇ”²ÞôoêXìŒ4·m \ÚòwÀIìej›âîî‰c£aèsnL9Õ‰¦n®KVN;iýLzM·ív3zJ +kôP7¥k9Ÿ9 yºÝ +nAå›ãFR#áÄH`ÀoÖf›yÖTŒäá]+ 7áÃ_¢¦»éJ›ÎeÆ(Ð"èHYˆ\±®ÃÞcšsÕ¿2£šÒšì^*kp ka’|"—IrB]|t»9¸«€a#², +¡|¬5-÷ Ƴr„¨k9˜Ó «ËTㇶ74P”Y!,%íÄßh‹Š:¿®æzVZÖL(-Älk±:!³nSÝ‘™¬wÁ‹…²Ì8ËuTƒžSHÃ4Œè¿ÇÊy#ÖD–ƒË“Ä|wO6°Ÿ9ƒŽ/% {9Ÿ€{Hª/<4ÛÝ“j5Åá=}S(¤(ïéÉLB•»ÆþúmÏÒ%´àÕqT-²‘&­ÅÞ¡ò·çA ¯×w;=³ú MþŒàŠn–3Ú»ï虜S²´µòØxSéœý‰îØžXþͼ(lŽÖ`UMGã¨gK.9¤(µÅ…R’ÛØâ£{¸V-N‡æW£Z:{fŒyÓúÓÑÀH¨#‚}R1ümôbD”ªc #¬ž­9VþgNìMcž7½œà ¿ H'J NÐS€$íQÉKV´”õ;˜#F¾G „ïã2 é>ÛÊBW¤€Ñ⢀¾ÑÚ Çç¡ç¦+ƒu;Î}ÄÄùøh‰[dV ‘QTç Êô¡#ôõŸè‚_d„tç®IRi0}ð eút8bmÎñ€w¼Ø2Ÿî<>ªõ4[íµV Ë凿™™fv8%cñ ”±¹ š³ô°F#†£M« óC3Ø+ǵß@ƒÛWü’P•^C£j¤bÍŽQJ¹=Ûï¨ûa³=V<'æÝên •¡­¦eàƒù¿ ÇqL/&¦ôδæ·ïTçw{£©MÅ›ó ¬ãã+#>•ú +|#¡YXÜœ9TÖÚ +’ê¶Bw[¨@­?áSìû |Yãd÷ j,øÄ±cx3ª˜nÌãÍx€Ýå»â队íõë„î„‘ÓTÊ íÙšáLšöÉÇäjß F4#Ð<‚×P KÍ–óYL`ÍÑB »ßª:i7S{6 e+<§=ÝF4$ó9cXSz擟óIüNä\㉤Š×/óÙ~Îçþ‡ùôz2#ý-¼2 Fø ’ÌvC°¡:ªõa$tšÝÍK”¦§²H4@R˸£§µnÈ4 +ö)öyÈïaÖ‚ž@4;[äE·¾­ËtÇMB’œD?“LØœq¨P[ŠHªõö{{±»U²¾ç2­YK–\Ù5¡\³Çµê±%i.чéGä[½ñ2ÏØSÉ„®í§{˜z¬ó“ÝÙ5=…LçäŠÖ>"¾ 5²È,ÀŒ‘óÈV²…ï3Я€žV¹Móu0©Sƒ +ÃNn¿t¸5[ü|0D”kù{¬ÚåNÇt Ïæui/ö^„êæ/'w¾¡RµîÏý[50tè–Ó¨˜Ý¸ZÏ$Hû<ø&>ûy”ˆ€v-ÚvìÖ{Lx΃Ԃ9§Ï†+r6ÕžcšbB‡ðèÅpsíâw•£“m+iLŠ»lóZëËçëvPd¯ti:S¿ÌÐ… Ú®× Ô†4tÕ°’\TÕ6%äR|/HÍÔŸà :?øEô—µÙ-µö èÕWÿtÈl¼€I~ýVå Ã[•/ƒØ['® É> >q=ɾO”¾mÃð<اWcŃ1?ñè7@or–˜¥bø-Ÿxž5¨úrý’@žÐâBg4‘•Óú;(îÔ-U·¤¡¾î¡€x—}/5hÁòš%\òmH+ùÊY¸¡³Ö<±É4ù òªµÜ–Ãõ®ä›'¦HÄ6(©a$ ­µÆïRñ‚Œ­€_¿ë»³n8Ë=UÈàs^‘Û(ï]w(ѸgŽÞŒ¨»B:’œ°»Ë©sÿŽÕ_¨$Ûìá;5©ÿt¿Ñ·êÓžh»· v )èǽÎ}ô˜—Ö–°—{„¥ÐdÙº_õ^-½á(WL#ô)Ùí–º‚桦@…)ýtBp„¤‰X¯wìý9¼·¼V •Û‚¿ÖòøHjª”ÍXCŒ –æ –¥°¯fóŠ Ü1ÆFަ<4{°eë R&w²×bêÔÒ ‹oP"Âe-ÜM1J§j¶¼<Ÿôy w³ÃÓoT©ÊJey Y«N$avq›(8ÇrÍã|Bâž|DÄ­p÷º×Pž-ˆ°‚I”zÕÃ’Ijä™°žFk¿Aaiž‚è¥ÞŸé_žëWÿšPãÔ>0Š,f7êÆ#Žö(õ'Rwç`K‡ͽŽyôÔ&HBËBã ãú½#ª™Ì[•v¶g'ü®t£ØÈÈFìs|ß(ΘPùçôƒÌC: ø×ÇdˆÖ4Îbití‰^»Ã!PÛÍéP mhL-a¢B¬@ÝÑLñ]·Òý~b¸šCcwrvšdOû°1(Wލïj k%KB½TÂǤ?4»;-ߦ6!ÎV»<ýQÕ¼+´0䬗9–\¹D}í¥t8ë‘›½÷Ç /³$u[ßÒ•E Ceè|W<¥ÎPEG¤9\¯ím-;‹Y­´áZÚKM›ãav¦yµû±›à0qÉŠÂWM°©fd)R@ÿV(-?¡ÎWÌ÷‚’\î¶»@‡–lWŒ?mÅqÍê@@T-{È$`ìÔ/Pu0þnÕ©Ëo³ŽèUª9hoRï²l^ÏKUúö1ãý®ºHrÞ7Ö` J¢3nÑ1ï¤ÓG¾¦åò,œ¾¹û'BêP‚êC¤tÂæÊmNG€„篪%eAU ßFnÄà´—YÖö ¯¥×ïZ‰ ‘VÉþ´û[sÅúVã˜M–Å]ß¼Þ:f.‰oC ¡ÝÀïÒη.µÆù™Â§*'ŒtÀÝôÔÔ•”|WeÛ›¾ËÂ+ÞN+O]nt sâ§2'~Bµ,\ZÔî xXÀƒrYÁÌ%}jÖþG× ½•<ŽL`4U„ôH»÷·þšÜ½Ytç.ø~úKºDT‚ Vë’Zª·Þ!‡Y½m†@¢á3}/ër +AB²E§¨ã)$K#íΨ<ÕTœ¢.ÖÀ8¹b»g~ÎiŸoF3¾çáI›7ïOئ¿gò·‘̦åßU·²ÆbNÿ±ÿ¤M-»´= ßÛ KÖñªŽ¥‘‹ÝÀнõºe:¡Kw{CöerÀ@LÚ¢« +išÄµÙH_µ^š¥µ*A¶–Ñ€o*s>ŸÒœø¦¿…“O¥„¡¡Zn\£²úݸÚ"/€Äm8ä]•ÔCàzÑž|ÌŠî®—ÖÝ)bHYf Øu*°®·× ‚üý8a k"Eb#ËÌKE õÎnckÝè’Åá+›úá%ŒÍW³öES±üÄШ,–}¤1 ͦGŽI*`Ùô¡žŽL´šú8ßO/EHß«ãÛýé—:¯öͶÃ|Â~ntÔ1yî‚$’U_ìvZñ{ßþ®C¨gÿ‡ÅÛYÿ™@R†ÿ+ñª‚\EzÂn¯ûº¡z¢{¤Ù@ nQûÙùD Å©áK“eîˆýÓH"ïû­N»­Ô¬±5¯'Vf´¿·¡“ELçŒé]É»? âWçg¡Ä8X *Ÿª=r¯Á±± V]VÑwAbb@½]¡UBorÜ ŸœOü$}â“IA#ô'ZG\ŸX 4ZOÞzTÆÙ‰·¨ñ9':Ú ? !P„gÁ„Oÿ3+ãâЮ>úôYayw»2ûœöïµý?%jû‰º.–.gFfýÔ¨š~Øï§U¶¸o +N’Ç ¿°DbžX]JµGØ…eð6ÌLpâ‰ð‰yºíìU@1»À5G½hÞšÖ„2~pž¨ºó0Þ ì!Õ]q¸)艬A'~¡.*>èüºšíÙiÙœ¡´ýhw¶åØl~Å µ)™ê7ÎT½K0[ä©À ä°}UG5ø9a=TÕíD¿PŽ•ûF¬™,΃ XYö|—¹£^Ô‚ª$ʰßo!à(©NxT,g&³Õøš‰©¯Þ­ÑPR@:¹YZ“»hWd*æa2y\ú]aRvlÓx©|Ìî˜ÍøÌàfë.tY"§B’AÑ Œ4ÌÎQÌ 4ÀÍËùúí¢÷íæh+-ª Ê.Ÿ2{ŠEþöUɲ[Áš‡¸œÎéæ´3Ö»ïè—­#t©ªtTÃÖý,eö'º#{bMú¶âá…ÌûÙUƒ'ýqNäöQoË­«)1¹ƒ­<ºeš%!%Èf»ºöìáäMé*¢‹P74QÏdì݈۫wê°`„Õ££=Çú;öͨvú¨¨ÿ‡ÁºaÑ·AèÄ3j§b€¤›-éCïRˆÐë7-Þ ‚árù‡„tžý'~=Ÿm³x”làað9Õìë+ä°­3\J‰óùèˆ[d6 Q<μ҇†Ðëbów !͹‡Ÿ$ée0|`ï eøtÿ1o¬Ã¹^ &ÃÖäÓ™#LTë“”â*a¸‚ËL+9fs–!±ø*Ø44ÍÒ¿Ù¢6­,Ì Í0¯ +‹çO0MÏþ%a ¯Ûì-™&oÙñHhs3³­ŽZ_¯ÊU–Ëɘq«[Ñ0Šv˜è<ƒvÒÿ‚DZ»Ø—ê.ÑßL«ý¨Ñº­ÎÔë‹1çXÆç[F|Þék™¡n¤'€ÄžWEÃ˯÷‚¦·pë]·¥¹{B¯Óú>/}Â5.vÿ‚nÚïýž0Î OæÕ8q< ø\;ÍØðtË÷Öú}:wÒ€e‘V¨$Úý=={83tÃp뜬&cæz<èÜ;Ænjª˜Ùf´£ ,cý-H-‘"!\ÀЏe/ÓynÝé¾b£Ó«Û½ÈdÎÓ”žÉäós2‰ß³8m…"’^ÿ=™û/“IkÑ“*Ç?B*#i„/0d¶‚ +ÕQ­!ÎBL†oKðx‹Š5Ī(ÿ¢l™‹l„sŠÝª[€)e!œ¤)ºe-:+6 ©ß(?CL¨)2ý¡#t¬Qe€¹ýžm•,êI¥5ËGÉ3\Ô¶Œ ÷/ð¨$I‰$Lõ3êŒÍ;^ÙüRÂå³Y*ÔW³Ý9ÌŽž;ÄŒ§v¡-èò8 +Ê +cÚ<¬Záõ ð+@‡å/(¢’Èù ÁÔ÷­ì—î¶¢´Z`€x¬ 8È>EuñÃs9<,°WªÞôåò®w¨b­ëM¼€‹W0n(M:J³°ÆZÅ$Øú,¼(hïï?€JDŤڎÓz¹ÊY€íM’Û—3‡PGÀ¦‡y!#t Ï^Ì6™sê0ƒ©•m's :… r³Äúò»SçMd‘.“gž0Ÿ©«î9ü +¥ŸiìªG•ñÏFã$©¸,ƒ€÷‚ÄLý  óù èÄo@ï®ðèÉRî¿ÉþÐC_<ˆ…~ý¾ÌAˆ÷Ë|ÈÞoE¦µï÷†O\O½ïŠ*¸íž…}:6v<8ó“þ€ô6£Ÿ»`iøèYã +¤/?aPÀÚ\½= hÊaa˜5{/tþpÛ”u¥È£¶ aKÃ,¯‰ÂÎI×Ö˜‡æ· ÃÙýCæç"RC`¯Z Àn9üîJÎ<á·D#F„$–¯?^kmþyxÿÇz™cÉ‘$1Tç)ê5Ï÷å<'yuðaî‘I²[… Deøb  Z>×Aë{gÝp–{ªðÀç¼’·QÞwéáäãž9ª3 ò +ãHpbÎåXëÇ”ì¨þ!'ÙSó¦ºõ×þ¢—òÓ´]Ü…Y«ÕÏÔ:÷±c0"Xr –{“¥jâÏvýJøjAðåÁ%®Ëx\òñ:pK›4ú»x·¾[ÈóÖ%¾cB{Mß‘Ë#Ò7æ~Q`ôN÷Zéëb… +N½ð;f¤³ŽÖ]ãiö)–?ÛÔñÂ(ïKÒÞÌpïKC@¹­[óéèŸßÌ×#ÿS7Îq|Ô8Üæ™IE–˜º†Hào™þ«GW6—®CéôO‹"VΙº è)yÔ-u…˜M×BMA “z¨Žp6õy·ÐÞŸW›XÖ*atÅ1´ÂKÈž$†Œj¹%µ6÷ ¯¸Â<ºóLãþ–‡%3aºÓ4›4ZNdÁøQ³þÒ­3ÃNûç…]šÇÄË;JÏê!ý½yÀÓo”ªÊJŒuZC'ÖªcØïfÒoý§XÎyœNüCÜs¯àˆ¸#ƽ¬Ø5ì'°t3±æˆfÉ„4"‹[XO€a£µßéÀhµþN‡>êýIþËsÝÔê¿ÖûÍ|¸`ŒY¸ÌnIÕ…G9æ£Ô¤BÊG0±›û~žs»dÑr¸q©öƒHf²r~ý‹xqvÀïJÍc€Žƒ¼.ŒS&¬þ90! ‘Ìí²ž²y¸<‰“Ú¡ý÷£UFÍЩ®îLš$Ÿ^Ëí3•”Ÿ†gÂïìÌ&ŸâŠ¿usõëÁ¬J9ÎØ³Ee·ÿXgX ‹¢:¬A·nÑñ‡“Ý»)LZ¾\mŽ8[…ãy[‡‹Ö_Õ¯JN¯i¯©0MQdÛ“ÍhñØPllU›Ëã1&-1jmhà»5«vÅõë­;Ý‚Äõ»A“Í„ƒ´»Šb>X\.½û‚r…9Y¬¸òÛŠÍÚ¶”aõBsæP„f«ÜîtH?ά­¶v¹ÞÖ1Áºf®–À©Žø.!ã'G5Ùd\¤Å=ª¬c¤ÏŒê”]iˆš¸WˆZyß8}ÜÖýÇ©˜»Îc›ãë}ÿzpTÝY,}TùG-ωòʺªÉLß¼/ò…(LZ„f6¢à&ûœƒ[2W½\î,FNDïâ’Àø@|à÷!y $ÞV«ž%ˆõaàÙþu ÊQáT€v6“ïÃÑJ²¥dÙëK(ðSêï…„tÓ*Ùž—ºÊï· +‡$fXÙZ„ÁÎXÞÇÎ#—«!âY§Š‰1à“ã>ppº!€1œ7$ÛÓîrxeVž \t£r𖃇ó§z]ò(ŵNi+>)´I‡™ÌJÌ]vµ…t°]‰¹#u  õqjíŸjkªgæPÔ;:—?jKîd1cQ›8¶Æ™äÅȉÝj”.‰)`IÆô¼ìΉøPAü ¤­°È=Ο¬cZ·¤ÐÔ™CÇ2[{&‹W©/Mä'§u]yOSö‹Ï½ÉwoÒRýÝô#©Ü_g™Ç~ëbJP Sþ˳h¦˜ZL7q>9¨Û´xl!Ý=<ñPV•Ü2jqªwD‡:¯˜¿Œ)`˜T|B;ÈåR#“ܾ +G.Ö“àä‰$míp±9Ol¾×ß"ɇ½T ­çàwæE€^;+1P JäSÒÕ²0ëz•IÅXå9*J´¢ÿ·P«u‹ùG¯âÚ¤ÄM»Ì‹•3¼»ø';>Ø# –_™ ¼Gaóµ*uâ(i†&™QçJSáô>¹V¨¶OTÖᾋ«gÑ?²ËÖº P‚1Â$áBƒ÷TÔ›‚¡³z$­C.³†#´L÷…—é÷ñ¶Jµw¸OO´Ù¡êXE%ºæ0>+$m±óÊîDˆ¾ÄxóMÿô8¥§0-¦@)&-Œ;p•X#G#ÄizK7G©Ëðéýö!g·b€:¶ßÏQˆÁº¶ÍÓÏð™è´Èw#¹­íP½†Ë‚–‘O¬G¶TjKSó¹ÉÈ6*ü~^ömEaÌD°[~óŽÐØ©õH`¯d)ü¸ÇÔâÝò¸°IÓ½Ö²¦MÇ‚~c#“~u’·N^UiÊéšöü³IIÏŒøÎîù¥9R4šë°£Q#œ†R†Ø+ÐÓõ†ßSÿÛ’¡c`Y›—?ëŠ)‚1Ï®Žhá@+qC‡…]Z>~52͈@‘à!5;äʨ:ßwÌ]}†u‰p¼Ì IàièmŽ¢E‹ã[Å“1ç¢Û¬¯[ |(NÞý®ªž¨^ôä?3‘RÚØìs¢$É%lÇÑQ™lb#Ù Z׋žiÞ—ÒG áT9à)¨bÖÛXÄ@$ªçuauïhͱÊ?+aW1!”S1ÈT×UÂiºLˆD‰‰DC‡kƒœÝáréJ-µ| Ž\8 ð}æJ`( _Á¡)س8)pî.у ƒ)g}ù 9†U©âÜ`!Îãã ¶!ðQÃYŠìû@š>܃þlêüÃ<Èmî±Ï8”ëÒ—<ðžY¥úl0:³€42ŸÊœ14µ^C[›'«UbÀò,-ÓMIeÇ\dJ,Þ_ñšÛ: ÛyÑ(qŽ£—æ»+Âu\ ¦ I¨£Þ'i(/Ù™‹Rn¡ÉžoTú»G—5f‹sÚO^k¡&ϬMiRpâ83.CK53è›i›ï1Seß= M{ó=`[oWø¤é;œïEÊòõ$WÎq^ïê^ÚBtT·=¹kB jýO¦ï‹˜Ã˜xŸ/¨¦ý±àƒcǘÅÜ¥Ûâõ¼`¶õë®ó[iýÕœ[¥Æ'S)|7gÏîÍœ,„‰*Ï:_ +¡mžrçÞÑsFU þðréq£ˆ¢ðÞ¿ÂËdaR]ÕÕU½$0 ñ1 „BQ$<ÿ_â|çV·=“+û¶Ýõ¸ópI ]‰×2¾ ÿ­0"ê¦-^)ê¡UB›ÕLäÓ<àæËv/Ú½ÌK´"’¨Å&/˜¶¯·sI|ÄÖ¯s™Ì´Oæ²þÿ\.:3-©†zPÑ#¼€ mÞB0¡8*eÙˆU¤…SR„Ò¡£%à&€8E®šÅmP¿€½éd›:ä¶!ƒÏÃÇ")ª +…e°2‚¶ ŠggÔ–"âÎXݺínÿ”¬ç¹Ì<[9æÙãJ˜ÞˆdÊKšHÄ`zUã÷w@¬…ƒš]r–E +5Âάv>ŒŽ{ºÄˆ§å\Q·¸ÔÙñ®V”(ưyX9øÄï¹p™Ù·Á Tæm1íŽÔžt¶¹ÕƒNG¨ZÝ¿cÌNT; ±x*—u¸‘ )6G œÝå*Y}s%~PC B!À‚î9__öÐ:&ÁÖãÁ…„úôãD#ʦ…d)^Ü= üdí¿g6Žb;!ŽJq¦H\ͳÓÍÅs1ù.ÔŽ^ž5»\PËêµY'Ðks„æh-ñ÷‰ª‡Ûì ºOáT˜(UVs˽}¨¬ +Ìù»eðŽÆLuû>à¼ä'h~ßÝ~æ¡yn°»Ô[,Û±£‘É’Ÿ©Ä<@ö-º¨<ó­nêŒ@aÕáVûaá*Þvrm_©ï'kýfígÜÉ{â–Œ 7âbiÆTgïVCW»ŸÂ˜]ˆ ÚT2[a3Hz¤4>ež²\û+S éžj0L¬£¨I泋S¬ÿ‚1¢~U㹈CùzI»ç~si`gšûÍ­yÕ³´¿ÓF^#|Ò[žãøÅŽt`EÂkAQ¿'…þh@[76õ[í +E¹f…Ƹ8^“.ˆ{©¨ŒCP\BÏD¬×«Ùuû{ ó§‡9ÔítÔm%µ1r¡|Qd%2ð‹9–ÊZÑçæ`ZcŒ—ê2g@ Ò-¦TË„ÒNÍP2ëÐÍ–®Z…’µn5»(›`x}±Íåð¤ï OÖÙ®NÿQ¥ ++áå&’Ur°n› óô§è.yœNÀ>lçîq'|ÅvY!kˆNa¾D*&×\M1‘ÒÈ,¡ï ‹úz-âJ³°—C/Õº—GSë[iõ1×: +Ò-Ò²šFuß%Nž(Õ=Ro§A’ŽÑ˜kŽÑš J3&£š0¯{D-“)+â Hi3ñä|o+mQld`›úvËÆ)Ó'm?À ãðÝáõùÆDüuxõåÛt¼ÿ÷ðê<ýÆ“óÒtþ5['K;É@t­(§s~8üòâë^ß½}óý§/kzñóñÍå÷ïþ|üæñþñøãôò×óW‡»óáomÑ2ž¥£ ôíŸ÷‡ŸŽüÃ=»NÞõ‹‡tüü‘)}þ|Šçwß~vÔÇ ñ2ßó +endstream endobj 10 0 obj <> endobj 16 0 obj <>stream +8;Z\799OrN%.0U@M5t1;-Xeqf&*_K#k;E@&#O.QB.cJ.NomC*jg"?!rtF` +gqu@oFR](o\8L*s3/h5)p[1J_Am.-b&C6=TrSM+U>mcC1KJ?K +72p6]e7_mRHknRW7?822[.I7%7T@qf+g*Z#oZ4]B6Q7uZ[Df:%<1qB4k^b'*F_GEm +UEK2&IQ$t`=CHh.'.jW>fo$@[g0OKDsd +piU=aQRID+&_%`=N8f,XYa#ehI7ISHeo7+@+F!+-$ck2'=U`dN8=#`P\m+eIk>]NY +Q=j,<+]lc6Ib$]_R[U&Sreq`U)a\/Fppoo4?oI)l:ZU?lr&hkVfaj23/EsY3+Dl4R +bj`>:cFYf)8/V=o?C3M5\NU]_b5]0.P3Ts,Ar:X +endstream endobj 17 0 obj [/Indexed/DeviceRGB 255 18 0 R] endobj 18 0 obj <>stream +8;X]O>EqN@%''O_@%e@?J;%+8(9e>X=MR6S?i^YgA3=].HDXF.R$lIL@"pJ+EP(%0 +b]6ajmNZn*!='OQZeQ^Y*,=]?C.B+\Ulg9dhD*"iC[;*=3`oP1[!S^)?1)IZ4dup` +E1r!/,*0[*9.aFIR2&b-C#soRZ7Dl%MLY\.?d>Mn +6%Q2oYfNRF$$+ON<+]RUJmC0InDZ4OTs0S!saG>GGKUlQ*Q?45:CI&4J'_2j$XKrcYp0n+Xl_nU*O( +l[$6Nn+Z_Nq0]s7hs]`XX1nZ8&94a\~> +endstream endobj 14 0 obj <>/Font<>/ProcSet[/PDF/Text]>>/Subtype/Form>>stream +BT +0 0 0 1 k +/GS0 gs +/T1_0 1 Tf +0 Tc 0 Tw 0 Ts 100 Tz 0 Tr 22 0 0 22 80.333 589.667 Tm +[(l)10(ogos)]TJ +ET + +endstream endobj 15 0 obj <>/Font<>/ProcSet[/PDF/Text]>>/Subtype/Form>>stream +BT +0 0 0 1 k +/GS0 gs +/T1_0 1 Tf +0 Tc 0 Tw 0 Ts 100 Tz 0 Tr 22 0 0 22 724.3335 589.667 Tm +[(l)10(ogotypes)]TJ +ET + +endstream endobj 20 0 obj <> endobj 5 0 obj <> endobj 21 0 obj <> endobj 22 0 obj <>stream +H‰,”{\WÇg3cW•t²š±3©Ún+Rl ¢‚F…"”%€€ˆ‚<£"¤+ø ¶ÅêWDÊ£¸ˆ¢( (E„È[#Ø )àªë[©«ÝÏnœËç†OwÐÞ?ï=÷{îïwÎ=$aoG$Éþu}˜Ÿ——“·Ÿ&h{ÊäÎÄ“h–=šãÀa¼cŽýá9ö9Ô[„éÊÈ4ø`:lš±3ç0' ;’\øÑ¢EnªUI1ñÛ3œ?Ý”¬ +JIIR¥fÆ$mN×ÇÇ©bv¨|BüT>)É!ú”T•_rFüöäM›S’7%©Öl‹ñuñ Ý‘¯rUÅÅ'Ò" Gb.1x—XH8®ÄbÂXF¬&¼ —ð#ˆ@"ˆ&B‰0"Bº£”ÔH—Þ!ÖYÄmâwÒ…ô!÷‘7íí>·;i7 [%;$ûÙ^oÿ­}ERj˜v§èÊ3‘OMpÆ${ª@Žã:›#mšxŸCcpÆ6ưø¿ìÊJøÎH« î)Æu^ žj)³á jÂaK* àví0’è…—Œë(3¡£îÒÝàFAÝÝ(O0r¸‚%r ØÁ ”X ÀBþ<OGdè h¹‘½kŽ W5÷+aªÛ ž!àÊ œ¢Áñ¡8Qý5çþ±€×A ¬£wj=Ä<ækâ<¬Ðз¯Å/Ù¿È'ù2tQa±ÙAô[p?Dv›fBÇ°í¯•Iã3ø0òƒ 8I}@ã6g +9«'t#ÉÀð…m¶é›^`Ÿõ*5JDb|–zsF’Éõ¯´<” ä‹þ…9—OB.A0 ܳ{ Õ +gMäGö‡zð§Ÿ\Ó9‹l¯ø^òcð@ü¤ÀËa„­¸nùE Ó]‡'¨ž$ €š†Åx€ú +q®AÑK¼Vòb-}¯7ÊUdwç[Ðã~òÚ ËP „s½™—ãÏ[k’ŠýŠ–•P¸¹8³bJfÅ®š:åÎöC­†˜&¡µ•êÐV-âñÛ®žxZpETýV±6åbV×î{¹³L{¾Ý{Ì8¥Êpd[¼ÒÝ?r®GhyoNùÞLý‡÷_Þ‚™"û0ß2.ÉGà'IÄ#ÈåPšÚ¶•Á„÷¼ykÐVµ-ÁsyæÃü_ÿs6üuPݸŽó q€í€~ fOJV¶ oMÞš¸/†÷ÔŸèîi¨á-šUå÷!‡>òºU†¿ç"Z¨˜„ä[Ñ&fÔî>gVšŸûG­ÐPÝXq™o©ù[LƒØz‰ê ._ÃãÎîØ{>w‡–¦òN“Èúæ_‡°èšÄ„’AJ”~M-ï­z†ßÄ3W¸`W¼úþrøó½–ê¶6!¢‰Š‹Û’µ–_¨5!BI‡Kè¯÷—WOi®¼PÓη_HÞX)§íÏ>;… Æ밤Ù÷åû¬ð¥U~is8tЛ‘¿ f®ÄF‡ñÚ¨¬´41""Oí¡ô8¢nÒª›v´ñíÍGMâ +Æ=ÖwáÚíeµ‰‚n¥»rá³!¸‘'ÀŠ^Pι丨TÙ£££¥£cc߸¸l¥ùôÁ#ùÔ +û$¯Ò0À€ìÓAl²Á›(À ¯ º™G—›†‡›£]Ü- Ÿ®ódC=–&O¹ÂÇh³«.‹hØËöOfåù‡€%%¯¸ò[“` Ó}IV8µ-ºtiŒÕ‹òæ/[žºù­‡—›‡†Z¢¤ ]Œd÷«úIFHÓ %ÀWÜÈ®›Ú6¡n³®2Ôô›E­v×'j¥ºÈ§-\Пm1\寶TÖÛLE#J/Eè‰ BzzR®ž×o+®J¾¸Ú©ì:xµ¾^¨ª:U\Ç×ÎM¯ë†(Ùõ8Ò[&¿~yUþeJ¹oçÝhªÓ¢ËÂùÐèá1¢ünÏ+cN3á¾ÛÙÁ›šÊª«ÅÖ¶#wnK™²]T˜È}!È_¾(b¬ÔÅI`ßÍ7£™×Èþ›°÷¦ Ã|îÇ Z·¿ +QšÂÔcß)MçNYM͹©uBK U¿QSæÍ¿½2ìÃ¥¦M/öˆw÷R=ŸWeD)ƒ#’—jÂÊã…ÈHj}{ÿg÷yp¸Óùo‘Ååf4µœŒäÿ¤ÒÔ*ÌñÌÉfº~íœ N^Ø) ÔŒ{˜Õƒî ,^€ûÆåÒ÷EyÜr+m"æ‘H-§á›AïDmÌÎÉÖò™þ§éººÃǫŒ£GÊ‹NLaó Q^!l9DcC!#{çw‡7 +¦Zþdj.tpè)q˜†zßDϹÿ 0Œµº× +endstream endobj 12 0 obj <> endobj 19 0 obj <> endobj 6 0 obj <> endobj 23 0 obj [/View/Design] endobj 24 0 obj <>>> endobj 13 0 obj <> endobj 11 0 obj <> endobj 25 0 obj <> endobj 26 0 obj <>stream +%!PS-Adobe-3.0 +%%Creator: Adobe Illustrator(R) 15.0 +%%AI8_CreatorVersion: 15.0.0 +%%For: (Cedric PERRIER) () +%%Title: (kubesprayv1.ai) +%%CreationDate: 3/1/2016 4:55 PM +%%Canvassize: 16383 +%%BoundingBox: 19 -684 1023 -17 +%%HiResBoundingBox: 19.6665 -683.4648 1022.1895 -17.501 +%%DocumentProcessColors: Cyan Magenta Yellow Black +%AI5_FileFormat 11.0 +%AI12_BuildNumber: 399 +%AI3_ColorUsage: Color +%AI7_ImageSettings: 0 +%%CMYKProcessColor: 1 1 1 1 ([Repérage]) +%AI3_Cropmarks: 0 -720 1280 0 +%AI3_TemplateBox: 640.5 -360.5 640.5 -360.5 +%AI3_TileBox: 256 -654 1024 -66 +%AI3_DocumentPreview: None +%AI5_ArtSize: 14400 14400 +%AI5_RulerUnits: 6 +%AI9_ColorModel: 2 +%AI5_ArtFlags: 0 0 0 1 0 0 1 0 0 +%AI5_TargetResolution: 800 +%AI5_NumLayers: 1 +%AI9_OpenToView: -2893 697 0.5 2499 1268 18 0 0 48 119 0 0 0 1 1 0 1 1 0 0 +%AI5_OpenViewLayers: 7 +%%PageOrigin:334 -756 +%AI7_GridSettings: 72 8 72 8 1 0 0.8 0.8 0.8 0.9 0.9 0.9 +%AI9_Flatten: 1 +%AI12_CMSettings: 00.MS +%%EndComments + +endstream endobj 27 0 obj <>stream +%%BoundingBox: 19 -684 1023 -17 +%%HiResBoundingBox: 19.6665 -683.4648 1022.1895 -17.501 +%AI7_Thumbnail: 128 88 8 +%%BeginData: 8612 Hex Bytes +%0000330000660000990000CC0033000033330033660033990033CC0033FF +%0066000066330066660066990066CC0066FF009900009933009966009999 +%0099CC0099FF00CC0000CC3300CC6600CC9900CCCC00CCFF00FF3300FF66 +%00FF9900FFCC3300003300333300663300993300CC3300FF333300333333 +%3333663333993333CC3333FF3366003366333366663366993366CC3366FF +%3399003399333399663399993399CC3399FF33CC0033CC3333CC6633CC99 +%33CCCC33CCFF33FF0033FF3333FF6633FF9933FFCC33FFFF660000660033 +%6600666600996600CC6600FF6633006633336633666633996633CC6633FF +%6666006666336666666666996666CC6666FF669900669933669966669999 +%6699CC6699FF66CC0066CC3366CC6666CC9966CCCC66CCFF66FF0066FF33 +%66FF6666FF9966FFCC66FFFF9900009900339900669900999900CC9900FF +%9933009933339933669933999933CC9933FF996600996633996666996699 +%9966CC9966FF9999009999339999669999999999CC9999FF99CC0099CC33 +%99CC6699CC9999CCCC99CCFF99FF0099FF3399FF6699FF9999FFCC99FFFF +%CC0000CC0033CC0066CC0099CC00CCCC00FFCC3300CC3333CC3366CC3399 +%CC33CCCC33FFCC6600CC6633CC6666CC6699CC66CCCC66FFCC9900CC9933 +%CC9966CC9999CC99CCCC99FFCCCC00CCCC33CCCC66CCCC99CCCCCCCCCCFF +%CCFF00CCFF33CCFF66CCFF99CCFFCCCCFFFFFF0033FF0066FF0099FF00CC +%FF3300FF3333FF3366FF3399FF33CCFF33FFFF6600FF6633FF6666FF6699 +%FF66CCFF66FFFF9900FF9933FF9966FF9999FF99CCFF99FFFFCC00FFCC33 +%FFCC66FFCC99FFCCCCFFCCFFFFFF33FFFF66FFFF99FFFFCC110000001100 +%000011111111220000002200000022222222440000004400000044444444 +%550000005500000055555555770000007700000077777777880000008800 +%000088888888AA000000AA000000AAAAAAAABB000000BB000000BBBBBBBB +%DD000000DD000000DDDDDDDDEE000000EE000000EEEEEEEE0000000000FF +%00FF0000FFFFFF0000FF00FFFFFF00FFFFFF +%524C45FFA8FFA87DA8A87DFFA8A87DFFA8A8A8FF7DFD05FFA8FFFFFFA8FD +%05FFA8A8A8FD5CFF277D7D7D2753527DFD0452285252A87D527D52277D7D +%525227A884A852527D527D52A87DFD5BFFFD0AA8FFFD04A8FFA9A8A87DFD +%05A87DFFA8A87D7D7DA8A8AFFD7BFF7EA8FDFCFFFDFCFFFDFCFFFDFCFFFD +%F8FFA8A9A8FFA8A8A8FD4BFFA8AFFD05A8A9A8AFA8A8FD23FFA8A8A8FFA8 +%FD4DFFA8FFA8A9FD05A8FFA8FD24FFA8FD51FFA8FFFFFFA8FDFCFFFDFCFF +%FDFCFFFDFCFFFDFCFFFD75FFA853597DFD7AFF8453062E060C2EA8FD43FF +%A8FD33FF592F0C2F2E2F2E2F2E7DA8FD3FFFA82E2F2E7EFD2FFF7EFD052E +%2F2E2E2E2F2E2F7DFD3CFFA8590C2F2E2F0C59A8FD2CFF7D0C2F2E2F2E2F +%0C2F2E2F0C847D0C59FD3AFF592E062F2E2E2E2F0C2E52AFFD29FFA8062F +%0C2E52A8A87D062E0C537DA82E0C7EFD27FFAFFD0FFFA82E2F2E2F2E2F2E +%2F2E2F2E592E84FD28FF7D2F2E5984FD05FF532F59FFA8A80CA8FF84A8FF +%847EA884FFFF7DA9A87D537EA8FF597D5959A8FF59597DFFA87D537EA8FF +%7D7D5984FFFFFF7E84FF7DA9FFFF59FD05FFA8062F2E2E062F2E2E2E2F2E +%2FA859067EFD27FF7E062FA8FFFFFFA8FFFFFF2E8484A8A8597DFF2EA87E +%2E84A806FFFF2F7D7D2E842E59A82E53A87DA82E597E2E7D8406592E2F7E +%2E537E0C7DFFA9062FFF7D2EFF5359FD05FF532F2E2F53A8A8FF592F0C2F +%7DA9A82F2EFD27FF7D2E53A8A8FF7D7EA8FF7E7D535953592E84FF59532F +%7EFF842FFFFF2E845959FF7E2EFF2E84A8FFFF2F53FFA8FF7D53FFFF0C7E +%2EA8FF5959FF59592EA9FF2F592FA8FD04FFA859062F7DFD05FFA8592EA8 +%A8A87D2EA8FD26FF7E0CFFA87DA8FF84FF59A8FF59062E062E7DFF2E2E06 +%A8FFA82EFFFF537D7D06530C7DA82F2E2F2EFFA8530653A88406592E2F7E +%2F2E2F06A9FF2F7D7D53FF7D0C59FD06FF532E53FFFFFFA8FFFFFFA859A8 +%A884A82EFD27FF7D2FFFFFA8FFA884FFA9FFFF592F2E2F0CA8FF597D7E2E +%FFA82FFFFF2E84597EFFA82EA82EA8FFFFFFA8FFFF53597D2F7DA8FFFF2E +%84A82FA8FF0C592E53FFFF2EA8FD06FF592EA87DA8A87D7DA87E84592F2E +%2F2E2FA8FD26FF7E0CFFFFA87DA8A8847DFFFF7D0C2E2E2E7DFF2EA8FF2F +%2EA90C59592E84592E7E522F7E2F537E59A82E597E53597D2EFFFFFFA82E +%A8FF537D592F7E8406A8A82EA8FD06FF2E7EFFAF7DFF84FFA8A8FFFF2E2F +%2E2F2EFD27FF7D2FFFFFFF84A8FF7DAFFFFF532F2E2F0C84FF84A8FFA87D +%A8A853597EFFA87D537EA8FF7D7D597DA8FF595959FFA884FD04FF7DFFFF +%A87EA8A8FFFF84A8FF7DFD07FF2F53FFFFA8A87D7EA8A8FFA82E062F2E2E +%A8FD26FF7E06A8FFFFFFA859FFFFFFA82F062F2E0C7DFFFFFFAFFD32FF2E +%7EFFFF7DA8FFFF59FFFFFF2E2F2E2F2EFD27FFA82F2E84FFFFA9A8FFFFA8 +%592E2F2E2F2EFD37FF2F53FFFFFF7DA9A8A8FFFFA82E0C2F2E2EA8FD27FF +%8453065384A87DA9532F0C2F0C2E2EA8FD37FF532FA8FFFFFF7DA9FFFFFF +%7E2E2F2E2F2EFD2AFF842E2E2E2F2E2F2E2F0C2F7DFD39FFA8062E7DFFFF +%A8A8FFA859062F2E2E0684FD2BFFAF532E062F2E2E067DA8FD3BFFA82E2F +%2FA853847D2F0C2F2E2F2EA8FD2EFFA8592E2F53A9FD3FFF7D2F062E062F +%2E2E062F59FD31FFA9A8A8FD42FFA87D2E2F2E2F0C59A8FD79FFA8522F2E +%A8FDADFFA8A884A87EA884A87EA884A87EA884A87EA884A87EA884A87EA8 +%84A87EA884A87EA884A87EA884A87EA884A87EA884A87EA884A87EA884A8 +%7EA884A87EA884A87EA884A8A8FD3BFF53062E062E062E062E062E062E06 +%2E062E062E062E062E062E062E062E062E062E062E062E062E062E062E06 +%2E062E062E062E062E062E062E062E062E062E062E062E0653FD3BFF2E2F +%2E2F2E2F2E2F2E2F2E2F2E2F0C2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F +%2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F +%2E2F2E2F2E2F2EFD3AFFA82F0C2E2E2F2E2E062F2E2E2E2F5359062F2E2E +%2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F +%2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2FFD3BFF2E2F2E2F2E2F2E +%592E2F2E2F2EA8A82F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E +%2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E +%2F2EFD11FF84A8FD27FFA8532E2E06537EFFFFA8532E0659A8A9592E2E2E +%0C2F2E2E0C2F2E2E2E2F2E2E0C2F0C2E2E2F0C2E0C2F2E2E062F2E2E0C2F +%0C2E2E2F0C2E0C2F2E2E0C2F2E2E0C2F2E2E2E2FFD09FFA9A9A8FD05FF59 +%A8FD28FF2E2F2E7EFD06FF84537DA9A8A92E2F2E532E2F2E592E592E2F2E +%532E592E592E2F2E5953592E2F53592E2F2E592E592E2F2E5953530C2F2E +%532E2F532F2E2F2F2F2EFD08FF7E5A2F3559FFFFFF7D5959FD27FFA82F06 +%2FA8FFFFA87EFFFFFF2E7D7D847D59062FA87D06A9A859A85306A87D7EFF +%A9A8842EFFA8FFFF7D53FFA8FF5259FFFFA8A82EA8A8FFA87D062FA8A806 +%A8840C53FF2E2EFD06FFAF3536355A353635A9A97E597E7DFD27FF2E2F84 +%A87EA98484A8A87DA8532F2E2F2E2F2EFF84A8FF5959FF532FA8A8A8FF2E +%84FF7DFF842F592FFFA82F7EA87DFF537DFFA8FFA82EFFFF2F59FFFF5959 +%FF53FFA82F2EFD06FF845A2F7E7E84355A84FF7EA8A8A8FD27FF2F2EFFFF +%7EA8FFA8FF7DFFFF59062F2E2E0C2FA8FFFF7D0659FF590CA87DA8FFA8A8 +%A82EFFA8A8842F84FFA87D0C7DFF5953FF7DFF7E7DA88406A87EA87D2EA8 +%FFA8530C2EFD05FF7D2E7DA8537D527D845959FD2BFF2E2FFFFFA8A97E84 +%A8A8FFFF592F2E2F2E2F2EFFFFFFA82F59FF2E2FA8A87EFF7DA8A87DFFA8 +%7D842E2F59AFFFA859FFFFFF8459FFFFA8FF592FFFA8A8FF0C59FFA80C2F +%2EFD05FF7D0028530C7D2727590659FD2AFFA82F2EFFFFA853FFFF7D7DFF +%FF59062E2E2F2E2EA8842EFF5959FF7D2EFF7DA8A82F53FF7DFF592E2E53 +%A85906A8FF7EFF592E2E2EFF590CA8A859FFA8FFFF590CFF590C2E2FFD05 +%FF7D28277E59A9597D532F59FD2BFF2E2FFFFFFFAF84A8A8FFFFFF532F2E +%2F2E2F2EFF7E2E7DFF53A9FFFFFF597DFFFFFFA859FFFFA9FF7DA8FFFFFF +%7E59FF2E2F2E59FF7E06FD04A82F2EFF7E2FFF7E0C2F2EFD05FF7D052852 +%840D84A82F0659FD2AFFA82F0C7DFFFFFFA87DFFFFFF7D2F2E2E2E2F2E2E +%2E532E2E52532E5953590C535259532F0C59535952532E5959590C2F2E2F +%2E2E2E592E2E2E532E2F0C2F2E532E592E2E2E2FFD05FF7E27052E52A87D +%532E2E59FD2BFF2E2F2E5984FFA8A8FFFF592F2E2F2E2F2E2F2E2F2E2F2E +%2F2E2F2E2F2E2F2E2F0C2F2E2F0C2F2E2F2E2F2E2F0C2F2E2F2E2F2E2F0C +%2F2E2F2E2F2E2F2E2F2E2F0C2F2E2F2EFD06FF592705057D53062E53FD2B +%FFA82F062F0C2E537D537DFD052E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E +%2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E +%2E2E2F2E2E2E2F2E2EFD07FFA87D27845359A8FD2DFF2E2F2E2F2E2F2E2F +%0C2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F +%2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F2E2F +%2EFD3BFF2F0C2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E +%2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E2E2E2F2E +%2E2E2F2E2E2E2F2E2E2E2F2E2EFD3BFF2F2E0C2F0C2F0C2F0C2F0C2F0C2F +%0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F +%0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F0C2F2EFD3BFFA859 +%7D537D597D537D597D537D597D537D597D537D597D537D597D537D597D53 +%7D597D537D597D537D597D537D597D537D597D537D597D537D597D537D59 +%7D537D597D53A8FDFCFFFDFCFFFDFCFFFDD4FF7E84FD77FF7E84A8FD04FF +%8459FD74FFA88435350D365AFFFFAF595959FD73FF5A0D5A595A353635FF +%7E7E597EA8FD0FFFA8FD09FFA8FD05FFA8FD51FFA87D5A59847D7E5984A8 +%A8A8AFA8FFFFA82EFF7E2F847D59FFA853A8592E532EFF7E2E2E5353FF53 +%2F2E59A87D2E2F2EA87E2E2E2F53FFFFA82EA8A82FA8FF527EFD3FFF272E +%7D7E2F5227A8592FA8FD05FFA82F842F7DFF537DFFA82EA82EA8FF597DA8 +%2EFFA8FF7E2FA8FF53FF2F7DFF7D2F7E2EFFA82F84FF592E59FF537DA82F +%A8FD3EFFA82E05522E7D7D27522F0CA8FD05FFA82E0C2EFFFF5953FFA82F +%7E2F537D2EA87D2E5359A8A80C597DA9FF5359FF53537D2E7E842EFFA853 +%7D2FAFA92E2F59FD40FF272852A85A847D7E0C2FA8FD05FFA82F532FA8FF +%537DFFAF2EA82E7D7D537EA82E7E7DA9FFFF59592EA8532F2E53A8A80C59 +%0C59FFA82EA92EA8FF7D0CFD40FFA82705287D5A35A8532E06A8FD05FFA8 +%0CFF532EA8592EFF7D2E7E2E7DFF5353592FA8FFFFA859FFA82E7D5353FF +%FFFF592FFFA806A8532E2E2E53FF592FFD41FF5228052852A82E2F0C59FD +%06FFA859FFFF2E84A82F2E2F59FF2E592F2FA8A80C592E7DA82F2E5953FF +%53A8FFFFFF842FFFFF597E53A8FF842FFFA82FFD42FF5228052E530C0C59 +%A8FD07FFA8FFFFFFA8FFA8A884FFFFFD04A8FFA8A9A8A8A8FFA8A87DFFFF +%A9A8FFFFFFA8FFFFFFA8FFA8FFFFFFA8FFA8FD44FFA85352A853AFFDBAFF +%FF +%%EndData + +endstream endobj 28 0 obj <>stream +H‰ì—ïnÛ8À¿/ÐwÐ.°€´2)‰úÓ"ØN»MzMŒ¤ÍuÑb1‰YòQRÚôÃ=Ë~Ýר}±R’-º¶7¶£d#¶DÉ™!çǤ›È4µö!õ#íñ£Ÿî¹Ä›¼ãÉã3­Õ=:öYèŸFô?a]¼9ö£œ¦Æüë­Rz7θ\'¸:ÊXrIwÂA&±Ï®)KáI¶Åí)_8&®+€,ƒÌTºrëïÄR¯GãŒ2õjŠAçeg·Ký< Ïò¨P-íû̦\ÃW´’kóWíÙ£Ÿ´6tgBÙ®Ÿ†ƒaSs•[½$ò0ÓŠ¦- Á§]ö*‡i^S.œ +#í(»ÙÍÖžøS{ªµ÷“ìÐ@xDkw“$ÒZoSúüŠÆAP¸›/S¬òfÇb ¤½»æCöàé´}Ö0Ò^kï?"-x‡¼~¾*=Óži`Ik<Ó|-¶¸Ñ8XÉm1ߨV³i2ÅB£`Ìó/tseŠN14¿Š“Ïq©˜Ø‰AÀq¼-ÐÖFtÀ†+:–i÷’á(Éã ~¬.ü0^Ê33&9ÊüÁåR“LoPËLaˆ®è€•C¿¿i…àd×ÄYûÕ÷Wû§;†çº°‹ÖÔ&âohmê­ïkbŒ‰%q”äl@§UGŽlCÂ¥‡í ãÒÅóðhÛ“Ø + + ¤‹ WHC7]²¦&a9ŸÄŽ5•_º³Ó»uÓž-Srv~ =¦%½Ë#??›ÅO¬ø¹¢³äôˆ7›LkK%-“H¹­EÌù +(ô)ô5}†B߃BŸ‰6‰>)ËÖ‰õõîõLE½QOGqœF°»®'±Ð66›zÄ6f.ˆt丶œ¢ÖE  ›HKrU€t<¨F›ÀG [XÎUêøH‘ h[®œ@²@!ÅGÅÇÆð‘(>®ŠÄ!ŽÙ @"×”é:¤mZ ×ùh-ÐGñQñ±1|´WÅG˲MÜ>zŽao–‡¶ê +˜Ø\ € `cè(® +€1œFðÏBÈ‘óC‹l”‡ÄADRÀ5¥„ ï]ÅÇÕðÑІ²µ |4\דðD:DñQññð+>®Œ²|ôlÛÜ(]d¹u ÃR ¢à} ¡¸"6} ò®5pÏ\ ø§ˆ§ˆ×T♊x«!ž©ËAÍHù bJ˜çZ³á´..žò%±¤±…G…ǦâÑRx\QB¨{–iFà#בñhl”Ž[ö<:"LêïŽmZŠŠMå#Q|\›IF“lŒ =Œ$U°aHd´MÛVdTdl*mEF•9n.s¬k%K)4*46 ŽB£BãæÐ©#6ëï–¡ð¨ðØ@<2'$LaÑ¿þek§ä´‹;ArJµC>¯ÖKX ‘¶%3wŠÖnåiÆü,a:ý~h÷£ü<Œa­”Û½oÂì&»ƒ+´´˜„yº È êo]ÓlWü^ѹEyš1?K˜N¿ÐÙ…æx¶²R˜”˜7ôWVMÐ÷3Ð#.J\U")Ô°ôøä…?5Š.„¤Î—'}ÊpaT‹y=‹ ¼|·%­‹É0üJ§š áß«)D¥çÃ^2 +i*×FÃä +êg³}YWt<‚%™ŸÑ“C¸×ü8•WÃþÆå*f¹ö(ŒûIXÚ4=Ñ=gt–Z/'jM9éXrRR£fÐÿ`xʨâýƒäý+±ŠÏ&rÂ9÷ÆKFÚÑ…$Ÿ¿wsÔDu?;9Dìi£„eL‚éͯ2F[à³K Ùƒ” Dƒ]‰œFyIcÝ!Uc2òòm1òCV¸¹¢,“[.öUZí4Šï¾ý~¥ƒˆÝð¾ÿÇÓ¼ïÃm ¹<ÕÒ$TXWXo>ÖuOCº ÿwÂ÷þQ'ðGðv2˜>éÇY´’ê@ä° Q˜©®ë7g|»V=£Þî®Kv 7#ýþ~±O>ì~í…Ûf‹¿>wŽ/³âÝ{òó/úÿþÅ͇+"óeÔŸ'Öy†,0Þgôj‡U ¸]¶òGU¡Ð€„Ùóᬲ6Â4qñã»ÊBlµzly*¶~,¶–ަî)OXZÇ\?Ö>ùyLk óãsº¥U93.‹ïî9ÝíªH„×ÓB$Z›ˆÄ;-–Ä•t÷õÒ{¤ÿyæçÙÇz¹tGѤ\ªU´å-z8©p_-Å\ݪ}€^&i÷kg×;y–‰•SÑDxÓÉ~÷²øüÉ“²½K!û~D³ŒêôO‹ø§õþŽþþ“ùçô#ß»w)7¸Ööè§þ@² +ÜëÇ©‹öý$dãf\¥­Þë½}í0É]“1Rçç[Ù‰§;9k}¨Þ×»öãq–ûàä5m¤¾×`Y $(»u,Qc¦ô¶¡éõ6*ìm{HۇljöBÐ%Ռ۰y\hoÚö·ë‚nY±Ô]1å´ ™-¤—âËËb&ùveh›^YG"l¸ž¥ ·flËDΨ{{Û@¥ÔÔÂ:‘ÄÈ<1G–sÈ9n€.6lìn·p÷Xxâî•È{Âò±+Í)%ÊŸ)=ÙI®X8:uQ¥†%Ëòª“É)Úv„$wý Ûê‚Â2±uÁÿs_­»qÛXx/°ï `ÅhÆ#ÍH3Ó ?|I\Nâ{ƒmPp$zÌ5‡Ô’ÔÄÞ§éÏÍk$/¶‡Ôº+m·ë6AœDúxôÃïÜÜÜ¡Yùqíù+íÏ·_û¨{~úƦòô4Øóí¸6ÑRÈõKµaþ¬6Oscî[·5/mÎýêm¥lm§ +¨_W³ €WM#í½W©Ÿû¿(¥ªx¼¨‚Ù…N³à¦ð…¶½ðs¾­ló'°£ë̪±õslÒÉðšL`ð:‹õE†·©/ +´éŒË²ê‚˲éyÎeesŸ\V5î94¡*‘ ü¼4Ì«øev›vY&©_©&GVæo+ŒË‚Ê2Gg³íK΢Äaºi Ž´6¸4’7¥“»tX&;¦›œaG‰Oÿ‘Šx’Ssÿñ +óŸ5•·žêôeƒ"‚¨s úöãU|jvr‰vº±VΦ¿¿· ½sâ›—œÉ¹ RýW»j“OÓ4/ ƒê€0]­á×¢º®#—\­Z«:2Ð6—-ÈeékdÑ lùuм˙Eéu93¯#Ý.g¼:rÖåŒÛ@.:¾î—zʘˆ„°uµÖ öuY«›£Ç²>¢,ýV\ÞÙ*&#5tL•Ni5ª¬K6U•¶üš»å?ÒcAë©«“rx.Färj¶çé¿üùuœ ßæ]>~_©;ü§_žÿ[=qgéËëÇý–SmêO:VÉÖð>HÊô_œ)®¥s«uÁx¥W€ÉIB©ÁÜD”ÔLCžÄØ|›‹t~˜¬?þ¤ Ú…wXþðÅù‡må³=.d?ãa²‡±ý )Ï<ÊèÕKÿ×Ú,̓ï^]¾æîxýÜ™<ì)À3¤” ÛDé´øÒ€áÎÐojæWúŠ… ïfÊsŽ. TÅkýC=Æ8{=ù“?Ïa¿½†/°]{@4)Àú…삤s\ÆFVÿû»㺩G‰òðG£Â”C¿|n Ô+¶sÇ8 š¹Æêµñf„£6ü×Å/ŠÛ”q¡ hKñ¨ÌsÓÿw!|~Qøê0¾,hìÓ¼ö3L¤âû'Pþþ§ýJ¢}L±î¡‡ã•ú[ä͵ÞtžŸ?V*ËÛ÷O@ß¿ƒJ.) ÿ8e<üé:Xjƒíqû“Ýݨ, O¢~ÃJèM}Ï[zøžDên”ƒòIø÷,Xͧ‹Àwý{åÛãñË]Oç WãjÑSñÊ9:á¼ÏŸ-W0j\â[õFØlG¹×<ôd¦ +S.¯y"B|Â=±ºÝ“à±Ç +E0oý +dÖ¿˜Ì_#&{ÌÆUy mÍòT=¡GZ‹o-㳌ݩàñ±Àèï€[´ØÈÉÉEÌ”U¡Ž^=ÿÁHŒr–ÖÎXÔÁ«ãMöhéCu\®æÙ«+N˜2!™lŠE%eržgDÆ=¾Bâ>7ì­fγ¥7³¬l0½á›Ô@jñŠK¢é›×^aô3¹õRq!…Úr$¢3|‹ª^·©"ǘPæq­ÇräщQ•.§Ú#y_ÿ¸Œ¹ªã%y™Ïò€E1™fýìQÈ©(õr|á'Š;$ä߸nöøbíÄ(ÆÂ‘dŸP¤™×MCPæ¨æynß æ«y:Ë”yŠØÉëÊ· ä + c#ÁsA¢7z`Ñ:Ão‰$[B‰z¼VH•çžù³YC> 3X³>à—”sñ1"ï@žæÀs¢—@û9f=Ç&ßo>}œ&:–Ξ?bÆð»fþ_¸®“GÍÝ&‰—à‹Â ®ãJ`‰•cÉ.ðý¹ßy'ŽëìƬyÁð•¼j8÷Œ‡÷uAÂôu[åÁZX·â£‘„8`ç?(çEDJÕ× ÛÖ%b»í°sÅãââ—K®Üu³F·I¾5·ÌéÕb1΄ɚ375Œ,ןcä†ÇcRÜê5,Â/‰Ÿ{懜åª^®¡­AW ÆS=Å”¾xPeÓ¶ouæÜ)ç÷ËXÄP$”8Rƒnö1Åz;n~dYj¬rDÉ?;SPÛÃ0—›WfaÝ›o8¤ gߨ[æ¼­]¶:€ë÷OaBmÌ3­µúÍí-ÔÄ<ç:±f$QXf†¿Z¯Üžj~R¦œxî¢znU­n +)Ý +‡¹· –}hS”ÎA¨Åwᯖ]| ¶BxÕ´ŠJ{°¦s’4´Jv¤ü¾Xƒfhõ-o¦îb:›Î¬áøþøâeBi^ßÞ7=¼ÍbÕÖ¬êsO.•Î%>`:xÆÃñ÷gàçÕöÛ f;: ×⬣Ç72µ®1>¥‰–î,GvQc¥kDµö¸ôö&çuhä¦Ü5µ”zêV)¤.mÂxZ¦ó„¨¸‹ÆÎc/—°†\cú R²—Né™VVV¦4ÐÄâ⬂µßß ˜òô§[£—m‡°ú2»Šòu¡½sÊ¡+npœPYŽ‚Ïõë‹G-.;G¯¹ª¾·+¢çÀö&+n×¥°»)k`¹ˆeúxP…_\]XÉ0u3C.åÂ9‚¢*È6QXý“o§0¸Òrýq;PæSˆÒ˜¼'ñí>Õ…¿aÆYÊwõu­ŽPñ…ÄÚ1’c>“ç‘™4··úÑüˆÍèy1¸ƒYç[¾½`·Ü±k‰dí0^~af®ã’¨¶eÀèàTO§Ùô·©L“ï_áˆ${k§ygõÐÜBߺiœ.KGoÓÉñ¤29Vö·~.PDô‚X”-tõýª²”¥§.µjEzÊì9Sþ¥úîsÎí̘jzÚAHq½QhXí¹¢jÈRøgÇ,=6"h?'·ãXLÃDB+êI Šð-Jh.ên2cX¾'‘º+6©NؽeLÎ>}¸%Œ81‚‰P®¢/µV^õ¬í+aM7ªB¦-L¥ ï?£ZTù5MR1ÝÃñ^r"šr±›ö{ù Î:aR÷óÂÚ ðÐGß b>ö'…LöÆ@Š@bîz@ñn?•ê‘ö…Ô€¶ºõBÁãÞ·LM#Z½VL¶…oó¶É)µ–‘Öä’Ó-Ñ9ÛÇYNÞ!=—õ£BÎg ê궃T³ÚÔ€òA?- +¥ÛÓËòQOmØC<ÝÖëh(„bÚÃ<ùBöEÀ ¢¤è4›OòBêìù#f ¿ëÏn°0Ü0#@\Ž+Žãù´OD6X­Ã½]W&rs~â\¼8 Üu<ó¦n?8 ~Kh}-©`€Áånc][¤w¢Þ¥º.ž3Œ”4­¹ÿö2¨*çÞÎ+À쿜WéBÚ\¾•XÔ ›H* ’…XAm]Y ’˜ÅÖ^Ñ÷^Go웓˜Úhf?3Ï<ƒÏ1ª½§úà–T¸4À2Ì7I·'‘D3$*ÌÞ§A¯u`¿‰°ëcÇô&Ü’»Î)Í'W–­úËÙ¥•A\1=‡õ>Bͼ‰’!âþRµÍã2ÄÍÍ{ý=WYÏ4”\€¯²½¡¨€~a æ¢fˆP_c„Gº=É.¢Ë2pÔx6²,·òŽ%Kk(4Y«&5¸xFº£‰QÝH†‡?RY#Åž¡(¼Æ²žÐ'Xlj| 0t;rA48¥à˜ü!£rëH†nbÞâbI¶ÐD7ˆ­ZhXÌ=ݬ­ag§ŠŒ h܇`ñrùË"®ÅL•Çxܱ +=í&Jð|¶bÏ»!•;Y9 XfWK„{uøL¢ÉQ{Ü•3áš‚÷d^‚ºª›#dZQEž3D˜±0tl =G „ÍŃ"N|A +kågÈœZK‘o ½xápäˆ/D¾’ß.$êX§ þ3jº=n˜2ðÙ(9—òÎ0¸X1-lV²—˜ªË|W1 óZ•ˆ}ºžÍ 7íKœ"„ž™ÑuÕß4 ݱ1lñý E|)¸ùUéR×í“•C×O‰–dMˆ.²€õ)¿]\%ºË½¾æ Äê^ã üËMå—“÷„ïnaíP¬D#_¯ír&ÞÔg´iÿÔÍi?ôÖêuãðo­& âkU›s<ÏGôZK×taj;G½KEó‰@~åÖæôŸš…pÓÔ1u¥G@96hò¹ã¹ß(Õ ½‡¢½vªØþ`år«¸UX5*àÈl•?õhTÈP°&æÂ_h†jª«øÏ¹.iJ–Zã:Œ'¦;ÓGp°ÚºIŒŠ4!¦5¢€{?W/Ú¨æZ)u¶ &ƒžLBüó\—v ®-T%w³]Eß«ËxrWÃëD³È@#:A‰Ãi¯qY‘-õ'Îl¤!Eµþn™u=òû¾aÜK=ðÓ¹Réß6®ï1ò½{ΣP».^ ¬Í÷rcI.ñêCõµqÌ5â»é›Ê¬4u(-rñˆ—’‰uúïÆ_RN² GÐR£E¹ûØt}iô³¤ã’;‰ÂüdP©§ü–6Rp;cI#ÎÁj ‹ÂPfM Vq [úå¿Óqd>3ü¸£ãÅ+4 ÛP égty7ódƒ”{6ÒDdŠÄÃŒû*ê–YîÛÙœ'‹ #.Vת›SŽ‚¨£tAxÎÄ‹ÏùV¼lˆîDÅÜ.nà¡Üþ¼†BµÉ¨Ãz­k²¾qÄÝ 1Ä5À~™höç6ÄÞZ„˜4ÓCxÉ=ÿÄ„£²"Là\!»NÒÑ¿iQÁiÈñ)âÆ†þ·¤ß„àY¼@Ö«#oºÌ?ÿ½ÿMˆ0qѽ†ðoMwÞ$äª8ˆ<ë΢1QzCÊÌÐMÀÉOªŒ0´#̾w`!NÀ€z'a¾a¯N¢®Z(c-‰]ŒXðv]ƒ•iáü 9!°à ¸ÕX?€ÔÆ -˜ŠÚT%b>x§P…y¿1ºmÑ“ÎÀŸÞ¤ SŸz2–eèz½I-GµؤІ# ÁïŠ +8†hè#¢íb!XN3™ùN‰4Ýcëõ¯'œ„“r¿æÏÛ{ßÉòð,QB·ÉÆñ^ûˆ©š³Iq¬m5ø­ä~‚UPÊÚ)Ü|«¶‹Õ›‹J+^lþH´ª¦#P|-×úº›Íç·3‹{áÆÉÌNµô˜:¬–“†Uµ.sipS-5·L_ªa3ão×Íj9/õXå¬"p©TbüÉWS¼‡Çï©û ›{y`ò÷GIz¦7-(Œ=!+…m‡çò;CæEM Á 'g£•Öv¨S™\ÿA÷ÙÔ`½×°\ñ¡ZžòÕ¢•š‘\r×á÷/Dܸõ⟨pòÃbÔªz[”™‰ÍN¨ûìBEž÷8!Û|­–ÏCÏm±ãG~í½ru±¾Å}}Ù¡{GÛšÄ-psú²O +5áäzŸäŸJez÷xd®’Ïd•MÜð¬äVíIIÐÿRȚܜx®³™4¢Le繨<6DFÝ=O™ä‡nöö^qÕRcr n +¥ÁC•Ö„ÄŒ¦M¡ž%§Ål`RàÖ*G%$jxœ‹ +›FðÈÙÖÙþQRbTª3ór¸kîVÙzy{XKžžXømêß ÛŠÕÉò@ü^̶\»mRª·ñ³|/ ×.U…™Ž>t 63Ù‡í—Få=~‹¼7±›þðèZqEÀMfô¥žwÿ +|ùãWyX»ôäÙdíÙ³–»ËÕ¡ƒo3d¥RKæ¸óñÙ‡¡áY¹$¾\=º¯D öÚ̉落FÂÃ<„ìþYKIy÷'[ ÷ä–pâ<_¸/¼tŸ{!99}ùZC(±ÃF7×§ÝÛºÍ2Nî)¯ÕׇâÜ0ù»þ“WÐû‚x_{Ê’&K·yîeøÄ*/…tIžíŽyV>ÌB +ÏÕõ¹Ã¯½×Ù%Ýn^òÜxéUǯµ7¸l#uMžЫ—R¥ðUK}û ÝoØÎçä–Šª„ÿ·æ–oªÔfmpS»wǹçò9—áTÝ>(?—yzä0I2úéãò{-7ü¸þcxís>±·N8›p©Í ú©‘uû¦L¥g~_¾LÑ™rÿ>w°óXö"Y¬HÁéžJüþžqÀN +Ýil¦øy¿ÂÜé.Øôj"nÔ*L×l’Û>`Ç“šE•„›.MÝç†ËïÐù¦Œ¹¨%G§«žåt*]²à†î·DÀ¦rÊ1Í»éªx]ÑÜÅ%ÃðÔr™ÜE¾õ¹}ìÐQKêÂ? +FäÓ` Êi(=J5¬2 ‚m O¼3aŒëö•ƒÌ†×]°hiwÿ`¸èžzMž¿Œr²/–‚mX/rYkHÀ<#îØ¿§ Û錕–»îm®ãñÒüyÝqÏ"ñZwà¶æÝxª¢žŒñLجD'NÓóìc„Ÿí‡#Œ°¹¶Ç¢¢|ðn“øÙškÄÛ™žp¸5Ø=éG«E®N³|r.ooïï¢*XTû'i™èÞóT"t­–$¿ÐÅuL*;# ªWÆÁÆœÎ&Ù†k»sO`=—öÆÛ¾0fÀ³äñþ¥=á bp€³üãøß,/¿Ø\Ðõ/——_l.Ö8hj·À³q€f4ºßR'\W˜îœ &k¤ÎeÓ½àΛ†UM¸T«ÿÝâAŸ·¹µq•'¢ÂSa !ºà[¬ã[Á× •‰j‰¤¿Ê½ª è/PJJ¬¼Óorpñ€à™.àeºoÔº\£§€°iŠcÔÑä§CZ ¨˜%Þ tí@Œ³|UuÝK»àäY L߬±§Ä¥tº[0Lü)S÷ò­ý°ÃäÃf±)qƒƒçú¹li¶çcÌu¶[¼vXÄÔŸó+=Þù…ÍÄ—ÏfV¼¦€Á)XøÝK,*M-c'Jî S~—~ö9v̹ÕÞ  «ª® ®Q£x9/MR6Zž¾Æü²t¸ÂôL™}ÇW"p¯fée­Ê·šl3í릪×QkÕ;}YŒ®¬‹Áûƒ ó1Dлë¢IÔN)£ôÙXŸoD[éy#ý¿ ÄØ‘)³çó¦—ɽ ŠHÂQ|šÃP 4F¨¸˜7N|™Ï\y‚Lô>}?¨SŽyR’öE7ó‘Úrx[“ìù®Z–cÑD”®X€«hWmYžE}hë›_Lì å6ñÎtCæ…»«V7j1§à”-På¤X=Çæf¤}ŸTÀ¤v&{¸ý.ÓB-FÚ_fòƒ¨ÕÅ/ÒØ¥ Lc°-5aû¹¥`Ð’**¤á†%³[+‚+ãJ#ZVKi"]ʼÜòmù@3·Òø, ŠHrdÖW‡U :j^8eD®°/Ÿ•úJ\홄‚‘;Q  Q(ûqsJ}…TŠ,÷¨gAísëyhmBUnÞóFªM¦šE€,³Þ˾J³iPêë>öƒŸFì¶±£Ô`*`0؃tiq?Þz'T¿UjÜŽ½™-¼¦Ãмœ:Þ~ŽIéüƒ´–¦Å L,O°öhQi'&\&´@)dñ|`Êð³ùb/$Vö[•ìS\+ÖÔ¹v7.ü –^i~G®Îî7˜ç{±¢Öbpi#P«óïÔÏvCŽ<ó‡F¢ ãl !V#Ñyœ*€Æ =¸‘ÁÜ=iÎæ©ÐVÛŸR>stream +H‰ŒW×–‚H}ßsöÆœ@1* t‹iã˜E1‚оî·oã8†YgwßDʺnÝ*:Ä·´O¶²^,Ø•¯VuƒHî*Aä ü>i5 [nº©¼°K†þüÃà ÌÏþ¿Yµ‚ÌûŒ«þ0áæÃÀFÒ—Ðbßun ³E+W¸9¨úÛ®st< lT“®ë´›IäEÍ*a˜pTißñ*.$Ž$A¯+îL´Ò‘ xm‚k¡¥&ˆ—OáP1ÚåØö\RûãHZ&FÜ  /ýÂ\úXþß\³†‡ÿðÂç·•](Ëä #z\÷Ö­¸¬í ª’XÃY{…ÖB=?8ÏÒų®k¤À¾¯7YF2ïA¨´(7¼‡{`äÄÊ6v!/ûÔß6²œÇßåž'+¢Tϲ2´,™˜"ÈÜž&fd÷ îªÙä?ISsŠ‰Î¦gÃl¸¨_às ,–Õˆ¼P9£»‡Q,XƒqÞƒ%áèžä…y ¿àmZ¬ z@*ð®’¹ø}Œ®ãißV‹Ž<ç‡N Ï!+;Ž÷"`*ôRÉ#׿ w~ ªQ1ÔVn#ðÙ½ëq(Ûëø÷'-òPHÊ"zt¶™€ I–j¶dD_À\¬.¡QL}%„¸µeÎ’†&Ãõò¦QA¸Ý`‚…ÿ\ýRê7~iÉP:[ Å?ªzÿkdŸØíÍotÉ¥GuÂ㬀ŠåP SkLÁY÷˜”eø6&L ô›ò]a Œ™oÇFSŽw&Sì +t*£¸e·¸¶3\ªΆ®¶pJÁ³-u–\ðÇ[®AY'˜›êÉ *J€·ÛýX`­ðî%Óâ.GÓ+v«)œR¼o~ø©@úëØNæ ¾Xº`OsQ n–O2Q=h˜¥5G·9ßÂ~Ú7/œ9éhlGSuåÇ‚aJ½ 'Ÿ›[ì¼^áx^Ó Ôm?ø’R½î¹¼¬ß0|i¸ ób?È…Ïr0}® CU_×pHþ›]¶‚ç±äÆÐ(ü +ÃÜÐÛ¿ELè:B£€cùOoßОåƒôÓãË‚ e­^ú.”¶tÚYeüµ@W¬}ï‘•‹å©"DÓ 45tLöÆYÕþ›Æ´MÙü¥L«öYÁjÇð‘ÂW@ƒpoô ñsá(œ#"ÃN¹èÐ?H†÷“1M£[ÍA+Åä$EZ~jNì:¾Ê¥E¿«Ž^ý̾ÓÐÄ nÐÜâÒÖBTŒ8̰è÷6x /ö ¾M¶Ÿ$yÒ¬@Ç)—ŠqÌ~—¸¥®_Z’¥?zŒ::›JYOÒNž8÷Wtö„ÀP‚Ä£Ù•”7­ºÉÔ·<]5ͺÌjw’Ý”.&Úqò«!¡U¶‚ô¡4ÉeúòÜ/­,>ú xóÌØs3e7y°êºèvsÄ´¶eï Ýo>â5i)h¤\ØJíg«õ]È_Svo>z„ùP‚½ÿÞc¶F¬òžü §*LÞeò ºgІUˆ4©©³°¿K íZÈ‹a˜vœÒå¿aád¼&€‘s| ±³‡¢/ êà·óÑ+T•:ÍvšSœÍ]oGL¸«n)ݦ¾ÿ¡O>¤K³®͵Âåle*\„Ùõ3+†‰X‹88Î +ÑІz¢Éô(+¡4ž¾gIµà,«`WMïÞnÂOwZo1 ŽMIØz;êrv²£ü+‹¿qo~PˆÆÞŠæô­º?­LÃz“'§Ià°_ûfNço2zû8έù)¼6§æ—]à-Ò8W†þÂÅûd7¿º Þ"ãôæÿ˜­ý|vú›ô2íN•Ið÷9gþÃÅÝ€ +ˆûqÁ]ã×Hb¢QúÎüüénPéÜܹ_±š~jëêª÷dQy_ÏÑkk·Ü_V2úÊÇJâWñðrþ”Á]ùt>þ:‚U»Û†ü᰻߱»9ü§ü¾6l§­Ö.ÿ‘ëçÝéý¸“•Óê°TÖêãnוwà{y­‘újNùå ¬åµþL¹Jb.ì× Я 3çå·÷}Oþïé"ü÷¿BP`üéWòWè±[¨Tâ‘"Ø{-£Éld#sO«"ë™ÛœÂðåÒídø*h_ß ÎõN<èŒÄmÙÙÉ,ál“½JY2Ó‰n_ôuDøÓuXcˆ@È; †¥ŠÕÑÍ©aW±¸\s‘¥Ë‹jj×Z\YáÏY—ÅZ…è^¤Åm0¤FʉEOÌ^îäCê[:9mÕÅq1›ÍTމyÿ§§[˜VvEa² — è’ö¢àoëÔ?6Ä{ì}Ôæ£L/A7´†š¯'Å–º„¯¬Rðß‘qÙ£?}šW?u3—Þd°fÒxë-Kà¼QžË5^Ú÷X_bä¢×Ò¼é¦<«oq€ŽŸÉ¬Hx8•Uë—}ãbxr¾‚ÅøvT¼˜.©Š’RÇÊ4ÕjBLˆëúŒÁ@T‘y憉²¥Nßø2–e?B„…Š0:x¥ÌBäÀžÚàýÎþq'ÙQUç,Êã¨ål;¶ÒÚÆ\Î?$S/ͺU9/’¾wšXØQéK[¨£NW<õšÀPGÏt©Ç·mmu”ŽIwó½Þ±¥ŠÎœ1æFÞªÄÕq¡í+Ï/lRý÷qu ÂîÈÉ >㈠+0ÆÐ&]YÚòz-¡åz1q‡¨àT­3uªLŸV 59˜Hýº ÔY ×ÀRc¡×®×žš&üŠš$ÕZc·b³KßQUovÄ`¨‘ßËË¢=•óOÑÈ™ú´7×QZFœ_ÑsÝŽJ—jmC:Ý‘x$w£BŒ)¡ætéÐëÙSEWÆ#É›¾-UlÇŸp.Œ'@7XxÞ–LNvøUožBN~¸£–矑¯‡# ¨±£•*US: +ø`lÌæÆ"ÁàØžÊyZúlÇì©Ç9.^âvTXlš;¹|[¨µàבÂP'$Ý•¾ÎöÔZx&•J9QaB[ÌíUÞ+Xj·í-pÔ"ýDÏvT€QRÀÙ[ÏR¶N~šeT,õÉ'Ï8jr<¢¢L³˜+ ä W +¶Ô1Õ›a©ŽžÄÛQ€Ÿ£ôt¹ íÜè¿o“îeK͆s,õ ç|oˆ +0÷æ>×é… í©¥>ãžÚ;ª¢4Nº +ûoTX¡áù!BX3wé8‰æ +•TÎá ©Ôµ™ñ}Í„aP3Š…J&# +QÑ}³M¡èî““ÀIƒt/_÷ᤈé_Aûi‚VÎDL—ÞW­2ýÔ'2šÔzܸI•~z ä®N»_ ÑWâ÷yƒD…NÚ¢ÇÎØÔ"58mòAO‡ãÌçÏ1zúñJᤠzÖO…qÒ<ÄÌ£ófÁ\a‚µFºô1‰[ÇHWN†÷ÌJx§­ºL­í¨b>90-ûŽ‘®ýÌè£æÁ:-2Û÷RéÓØþsVm^µ¤¬7ë+›¥D°œÏjRkdOð" Ò\õð xèg4ߺGòµ¡•"m‚ã¨,O<È>™GU(›(»>ЛarPrŒ;:yá%]êåb£â¸4êÇB:~¦K>øR(ð¡iñîT'¬ hgØ‹¦ETñà4¹UÁP=¾÷"§e†gxF& V¾¦¯ã¬3ôžYùÁYrj¼l™Ê¯â`½™V@»Vàd¨½Fjdç ƒ©OC`ʆxª£´a©h²Pµ1 +Aã F¡%Žºø†*êÚãqÞ¨h60öia‹“álºPÅ‘švøŒT®ëºQÁÈÈ-T€18ÎjÔ gƒg{*7š`©À߬¹O3›‹f,ÎkU¶PƘPöO…Öðјã¨mmÄ5‡Ö™¤tÃÑ›žîÉ®KÙ­CçÆ²”{–~´%çoië´ºÁJÑë¬å£„.wôÚ;)è§`ÙáQs§Õ™‹og?ÏP}†º>&ÈKz§ýz4žª¹×­é\õhƒ™Îs€ +ÂV½›7–)ȯFÜnô€!ÛrмªS„ãaéÒC_˜Õœ^Å~ üëqëåÐÐMÃÚ· OŠŒ0UEƒÁ³‚=Ó¨úAsÖÕ_Šó͇“ÞçC¹è†–9ºb§“Vltµ®«0jQ¥=t—2{}#¤˜Ñëg‹×oN39=t [^/ÆÂñ{ Ñ££Yó]A­¸ob8??\Ô×f«…Rëi|Ãi—¥0¡ñþúf7êrþ6Ú¥½8:þM~Ý’ ZóÊ8Fí}äzlª¢Øü¡÷é?¯—ûæ¾wUhDM„«ý¿­BÆKù\€†ˆ†=®J›ªßƒrÓÞ}#êdQçþx¢RºûDÂcë>áù‰ÀqìñÔF›»t¿15é/ À–ãî¦åVГ=úÓG[M²bëó{»`/þzi˜´Ÿ ´¬ncrÈ ó¶/6bÍciäM¡*†°ŽÇóç¾™ã/nZ>õCzú\Z_0 -®Ôÿs·ûÓÇÑéCÇóæ ûõ@³² Ø&*·  Ó¬Ÿ½  ¬S× +åÃ䯲ÄNÏ|íw!Õ{]}î`ŸöÀBsI4íFñ>]'£b$¾X–è×Bp‚ÉÛfÛ®;û.,Ü×­(j#.TkW¾¯¼?­BÀ®¡j¹¦`MûƒnòdÑZb­×”)Âz׉ òZd–.où¯›*›m‚ø¿2+ßoo-²ü8QÅÞžW~ÒëmN¬N·ðYbccÆ­ëÛnÃùãÂFïrßü Œßû‹–—ÿc½Z»ÒØ¡èod†—¼FmÅ +-ƒ -p¡Ú"^±^­òôÿß$f’“É0|‰]¬éyŸ½÷õ@SD§²ö2éŒBç¦5ž`¤ùdλ+šRí.ÚS1þswEÐ-Xš‡éfÜJ«ÒœwÝ*ƒß"$m¥œÿ4<æw1 ¾4äœÝ@ a54¥÷ ´ž•Ákd'ëÙH•ãµ–zÒa„zd%ÀYû§­Kb½‹@Y —¡T^!7¨@£Œö-e£†²7¾ÁàHô¯I$È †„0eÁ‘ÀH8ÕéSÇâÜ#,A®JKåÇフð’£3Dÿlj©tÒØaÔ˃qMßCÇ4¥åûµ|ßS(’@‹òrá§E¸˜H6pX«èN à‚ô¡Á. àå„t ¿J™Að;ó ¥€Ø^@æ—ãÕU>Yy©SÕv´žáÍ$¹^Oæ2J~{¨ÉÚˆtÝÝOSgëAÚÐk"àû–âÕFí~º§°sÖÎzÚ…$g"Ö+ƒ/¢$NîãŸušÁ€híîµ.Í¿vw±ð“êþŠmÐ×E\äÆtûš_¸ BckùÖeHÄάr¿Û¶š\kAwÎ Z$“† m)üE+î¹G¥úÖ†ÆûfC#œÒí…@zGb ÞÇ ê±ÈcÚ8r)ž=Û©ÿ@r râ+LCýËÈ€ÌZõ/˜"äHö&ÌÜæ#G;¡µq¤½ZC†tt­8°ž BkÈPnËp( +x†ò[ò£¤iÞ2ºnBñc1žÞ ÇbœéÄZCoÉ(’Û´Ú_s“U/›ý/ZÄ´Fþú• ¯V šÞf²ù³RýšÜ[îùæf"Sî’«‰@gÏG Lz£17Åøq˜”hÔоþ9 Àô€È|a,X7Ìd€añ1iŽELU¨ †S¡ß~/uiŠ"4ÈTCÕwuO÷òi÷¹óÇ#@c{ÞÓØ3­%;ÏXB5qoø&oX³÷ ÈšÕ¬ù†Zë˜&d-Ô, EFŒ º#¥è¦ÝyS ´h Xû‡%>þÑ /¦µ¸º••ƒͼÝX»Éeë#éTÓ>Œ&ç‹EéêÙþÛªÑÇZýdÿëïz·Ô©}Í®Ìóz·üiXýYv«?k_2è·\ý¼’ù÷ü¼’m¥¾ù1£Öîcù é>–¢–‰¾k¾e²ËÏhoö÷Ö‹$Ûþünæ²gƒÅâËÞmƺ|úâ–9ŠsM`É‘ü©-?_;ì'ƒÔÙÍñsêôß«66¬×£çdìËÞTîõp8 ^Y¾q;^#õÇ,è5WïV®¯ÇÑØÕí±à•€ u|/K—zµN¯†õ¼.–ÅÔÒõj ><_žqë8¶ÿrEŽÄY¯‡?ö=¯‹û|ôšúrúv#xuF€¦ÜɽoA¯¨ÂoÐk®>ív‘ Ýã¨q9y~„¼N`¯Ÿ¿_¯È ã¸öýÖ‹¼ÿnŽ ¯×}E{Ký;Êø¢úS÷»S­ïï.½ïˆøÓDÇù”ÊÌËc×xacƒëEéùhœ-Zy{ÖŒÑVÀZƒGê¸a!¶cLEqË*r—=â¾Çæm5Ÿ¬¼Ô™!µ‡ÝPŸŸfräïsðJÐÉ9qQXã}³¡ ‰ å^M•^O.X‚½o–7½B­“ž4¬8X*O«Kå cºž’R™{¡KEb²Êýn›ê´¥ÈŸwÂü¼£ žƒ)ZŠq-@©XSÂ}ƒ¬ùͨv†Ã™—!E1IÝ ut}{ªÂ-ԅ׈‰¸Aa*?OšÛ•ЇÌtv†éƒråª7·¿kùÚ¯¥ ^†$¯Ý2(o +iþÕJ)}£JœåŽÂZª¯µÑŠâÞˆÑg<¨b_ 09áìÚ[Ÿäî¢þºÞÀÏ ù¿ØpÁä&ãQÏ7¯„Aò’;j¼·‚´hbËôRªftΨI'c3Šœ³+SS^ÒÚH óTŒÿ\¯§î¡¢°vž¹Ma©îcí0}Ãêå‡ÂTP誹Tg­Ð…¿5&zè-kï^Kñ°a"þ͈9Í °“üöP“ôµ.@!Okâ±çÅö`%¤ã6Ž´ `šÙçiê áa-MFý]Zœ3E£"v‹à4£ÿпl\›šLcƒôÆGc<Ú +ªÃ!z 6ÿ.t˜ÒúþõóÊOc¨šì}ŒÂÿý!0 :PËÖ Ðdn½’„¦\ ­kˆSCf31„æáŸ ëê†JDøhóð_HÚ¡a¿6®{x ÞCrâJëÕ?ØÙDô3¢e- šÂÔZn§EËï.ÍÂN‹f…,%X:ni2ý¥“ÆþËz–ÿ4ÔBÞNõÝ@*âÀÖZûMÒÇnVcKÔ€ +{pÑ ²çyß=ÿÓ~+Þ¤y߯ʾc±ÖÕr X• ÁGÒz6JKWÜúnMH6\Yr“—I?\—ìþÔ—o\¡#Õš…Ê`ä»·›å¤:ªˆîN ‘™B9Õ)§vÒ’À¤½5óµ_KÛÓ’Myì–"´µ_Œï«ÏY™|g1ÍëCS”ïî.Ñí¾7bô¡­Ñ[“Èwöö´äÛí“Üa\™\ ?Xuâ¿7`0¹ÉxÔóÍ+a¼ä㎺ŽÜð[¸UJ'sK=>kÕ™›t2DïMäæn®Q°0Hmf•Ös+ º8 Ä/<ÐIóB:»)¤g-^H3õgŽB!Ý…´æD`S¼ŽùAÞb•{ŸL¯›ìÞ ¥“®‚ÓjFëR#9"ÕÁ‰/^'ÔÉùOi‹p À7AÕiüdùú:%’z‰‹×{2Ð ´ÁÈ êVìhµáI+ Å´%¿µ… ôtž¹ ¾Æ®êtÒ]›Ä€i +ºÿæ3 ó‰÷ÍàÇ– Èö –ëËé¦px8xPK;<ÿ[ãä(0TÙ¨Õ¶F0ä¤ášçÃNÚ9.d´âòž¡ä–á°·'1”ÚI^iÁ +v³•¡pmgÝhÍÝ@øsU?{Ù@kC·ª'$¬™¦Rzë¡y{6 Fë-À10(& ;¶< +½i‘Ùdµi`§½ÈŠã®^<‹4žŽµxµæñ´é-’ÙöÏ:•pg§Én­¸¶–Þ}¨6Û†â'¹IÑÛ…VìUl’ôÞ†¾@YÂóZ㢎HÜž š£šÌ‚U4ÔpP>ó‘W|ÿ®d­ê?>ÅõV\G¹yì/©aÜÀ€×™G×ëJ°Ó0u€ÓÁçx63A¾Ì$¤AAÇÄ-;¡(LAm6aâÖ€©ø7¿6ÀÚÛ:Wukø0­¥f\Ç’¾ËjMb<9"ˆLZú`·'(h[þ’åìÃb^ ÚÂU1BŸøm´€«h¤c"×é†)ÅÞ½\Z5%‡‹h´ê;4Š_ +ô³Iº £)s(_ +ðñê’Í\gªºé4²»wk Ÿ[ÒœGsãÛÉacm%! å”M7”Í<ÕÝx´¿>ïeTÍØhÐomHZƇ+ò‘¨€­¶@’Õ.6núéJ´‹+ {ZtáúXõ÷K © ¯›é±lúÎ}ªZüã¢ÆŠoå&…ºO3ÉGÓh{ÈCPK± uiü­hæ²a„›Ö”ž–n±Q¯]뽟ª+ì40ºaäúeãaýA}ÊþîQI|` õñZ‘øt¹îO02…QoITk‚ÁßêA‡`M9˜]M+â¶áÿ*ö5Ohàjf&ÑR¸õW»IUJBªK¦ŠºBÝ—À­ôû“¼—LÒw1’'S…>™lvæºá ËŤ/£ÊÝžþÍíÉNÚWÅÎ+úÛ$W¯c ÍÑëÙxÕϽ8QÒœ‡ë²ñÅø²Ù+¸d¶ý³Î +uš£SJÏ™d +j¸ˆÉÄ5jDü­Ã¹Á O2û^!Å>‹Õ>ÌÕyìG«“SªÊÑ» ðÛ‰±ä ™›S‰Ü`45PW%Æî…ÙR ­±9>¥71“ÙLcäûks6Û:Rœü“ʆWVHgsvú2OšlG eÃÖzUZ‘o¯‚tGw9§-æU(n EÎ)¨»"Á¹ÁÁ¤ ; Ž"S:8Q—¢Ú˜Î(¤,—iTºKAᓬˆÛS&¤›Rw¾ûÓì9¯Òì¹KnœÄP«Û™*7™pÄ5LЙì[÷ 9ɤ6>¢On.ʾ‰Wçy'æC0ò‹ýО+Ù®Wø™ZØZ€SPYI›G×”êV{mÚC$Ú¡µë•÷N9÷0…f#•_0á-rnö5DªY¡Ã7Ä Á=Üsh5´7Cä˰bKu³o3 5RÈ2}â˜2u¦Cc£É(Hq!|OPô?-££‡¹m@ÂG>ûŒÚk½ 9*±ûÌûFh¨=óºD7·'Ø3ž ¡F’ºÖÜ õ rÕ¶¡ÛØhÕŠšdSÍ@]•, ½]·z‚¨þÝÔ©~t< ]ƒQ,èâyt†LUC‡I#ç¼n‰MfQèÎ0²v. 0>çe³£)sˆÈ”Æ–¯TB©lÔ¡"“2»wœÚkcͽñÌ9@Žü8ŽMåÆS“IZ6óT?ÐíÙ_ÆFƒ~Kòö¤·+‚AK,ŒÈw7Å|K±ëøaA‰F‘bÛŠ¼##Nk|ÑȬo®ó±æµ +±ÀC‚¤ó‘—ˆ¥¼lb:mh‚¡)kñoNm€©·u®*Nšý0­¥f\kIßeµæÆírÊ&9“àQÖ^vªi®NXÐ~DÚgP»\4a\èb»Éú’marbO]8“|sZÿfšu¨ËebåSÀ1“W•‹®¢'?áãV ý‰Tý®™0ðß—H²vá³+è§4õ—*$©p躙/øÑT-VùŠDórJMd.œ”B’®ŸbÑëÎz¹¼RýðóÊÔ`ÁV ËõïHo5t•øûÄAMkFkÍÿ½:À8jªû‹‹j¨ÅIœ‹+~><“Fûã•þꎚzöa¨ç¹g õÃï×Tch㑳 “T’o¾/;ÖÒ?õ¸ËE¾ND@ÇŠ±jZó5¿ïÔîF£7ÜXIÿÝæ¢FÁ³1õJ‰8¨hu’IV‹µuiüÏzµ6%ŽDÑß‚"$à<ÄŠî(#ŠPâLI)Oÿÿvw:éGúv:—Ԭ޾ÏsÎíœÅÀWc?²wB_§'ä}w ÉÐf¤ªžßT?•÷øñöú»¯ûËG§÷‹!?ýòNÎ +Êzx.ÙQú¯'ë- ¦õhœ|ñï+ÑÈZàì¤õètP#rM{œ®Fr]Ã÷Xñ鲬2PÄúŒ{5ü``ùAMX.UÙrZúR±:ÑÞ¤ÀR‰jÿÙöË!ZàO—ê4 ^_8Oã-ß@ÁsŸ¾N»¦¥bPôð¢­g÷;É0K2<´deŒ3m¹*&"kFa…7ˆ‰<êæ;)•-Å„÷†#1½`Úu¥‚ÔõèH—†4ø^ÑÂgßÁá7®µ§T‚ xô3½¹Õs5W4gØI7@6þ§í&—>Ÿ8ÊäÊvëÊ49¶7ÒUÑÏî8÷üô&— S•¹qóæ{èàMÇòJ¸y©W5^Zc(%¢7Á–ùýZˆ*JVBšM¡Uµ#HG¨Íh¥?v±³1»jrêÑ@(: >Ó'{p'“Â2lÜ Éf\÷ôÁ=ÀЇ>h¿«[:)îI1 dÃ5èÞ$“ÊÖŽ÷®m³ËÐÉp#PZ±–…ÊŸ“Kuš™ +aÁ ýÐròEC.’öÆ;±f߈Uúß;+ɧD7)8ôû†ÎÒýj+âµ€•}n¨ü£ÎHMž}ÿS9gɤT‹ÂŸÄôõòÅIÓÉžÌ|XV]ˆéß>ÓOôŒßA¯Ùé?¨ãIU•ê¹Q,wï®u^Ó¸‡+ÞÜîMð2”ëÌÑìtåµ=0r*ÐÂn™ãÉæÎŠ&Òî¶EÙmÛ¢>*-5šä¡¶1Ãl#§7›úi1´MÙ6âÕâž]²Z÷›æç©ÀÛÁ&¦Ÿ -¡i;”¦ÿ6¥ª3Ÿ&y"ü¡m<ž«'bS†ùPò ëÜ”¦ÀÙÚ¿ ™‡Û •%ì +ºDЦêt£;7”¥×,Tz#Îx“½"„DL¦T‘—’õÇü<œSPJÇ$%ÌifWë!Ü¥—«ã!dW:$â¡¥N#_ŵªlÕV²©zXÿÝD0 Çsh Ë–3hœk +ÈõõÂDÎËpòH¢gÌlÞë…†µCÉC$T*ãÝ×—Jò,*N3-•w¿Ì;Ð9¢!Û„z £˜\sÛ_DO8&ˆÈ3ÑJ5 +¹CT1)¼+ÔY˜Æ4ÝøÂZ»ÕvÁ³ÖÓ Ž¤Í‡h_~¨E<À´¦i}»ÍbQ½AV‚É„§7l=Šñ’ç«ïW.ʈ|‰÷°E~‚{C^ð÷‘2ÄSâ€~¨ÐB«øvYŒŸ¶®ôɹÍçLîØÒ&w€? üiƒ‘àÞLÇ£Ç|ê¼R ’—zePãÅ5–Sb{£OédYTM÷3½É%TV?CºuÖ.·®¬í~aÐ3|m°ÆÓ KK°@âò-8ÆCÄ ½ï¢%ºùà €›§"͆½%úOLDéö@j¤¿7ì—2äCsnÄʱ(w¾%ûI>m×Âô×BÍh]m›îÍ6ƒJË,éh0/ìÓÂ5~ºžÝo“¡ÎÒm,:é[uð, &óڈ©îù¾Ê“½ ôRÍ Â +­±›«‡‡Ù2IŒ±8ÐèÏ)ýD˜)_ï·´}ÂE`<£x´ü· >p2î‡ù4ûF,-0€¥• B3P@ý¤á‚Û;˜´*.¤õÉ]Æ# ¥·%‚rJ†@™HáxJ”5â´e¼·i@P93ê„ÐDšJ…°wGn9OÔÎ9³[¡ùÖOEÌ­±õñ›+f,¨7ω‘I“ñ‡9 ÈVà ç—ÿ=ƒþVšë´pÆ|[4Ò‰ÁEÛj÷׳¬‰­Àên/0Ì$êÞ$“ J‚õ¬°5  ‚ÿÞ“ +Γ¡‡íu <†„Bí ¨«Ãpú éöü,‘çJ $îãáuÁú™Ÿn:GÔ§êØ¸o§y¡ûµëþÑ¿ùáOwy§î#régi¶Œþ3ΰºØÅ>ö*ËGÖ3Ê>¾$—yƒ}”–Q¸oD4£ ‚öQ„*îÀªã‚÷··ê½nô}”PÜÛ“m¿]¶ +´(@Fû(¡ÈVÁûbüƒP“µD6‘ÐŽ3mnp†ø'maƒÐßž“c‡gh…Ó¾žsüá guå÷ÚëpÚ=ñ?[›:i÷zÿÅü (»u%A¹>-Òi3\ËÝ»ëHÂ3P<9Ø{Y?èO{Œb§+¯uý)ØX…B9M4Ž'›[煞Ȕ©XtŒ8©)-Á¦å_ª¹OÀO—enÒ~ÝÚ™Ž!³}­‚ûˆþæï£”:™´ûˆòl }¬“5$æ ¬•,Øf +ÖÀÉ„Vs´ +ôFA;šý¢>*-}ÑüE;žƒhÅøië*lú¤)Ú×ÝÜž¨›Ýl”Û ±žVyݱðð Ç"à=N“Lc¡Òëg¥ ýíQ¯-°cäÕ󖈪†¦xRÖ˜¯fgÓ$Yûg/0<)àÏ ¤š•ÞL¾›¤4ñ‰+¢¡û5mdªÆ$°ôùÄáÏ(msŽfàlú‡åÒí5\4_GGéP&™¬óÓñ¨£x|xh€\TsGLµck;>stream +H‰¤W×vâÊ}¿kÝYHB­€MF 90&LNüÿénE|ÆŸñ‹V'UuíÚµUïÝ Á—œ˜iL% ÎÚ2€kÜÿÿç§rL8Î%E‹{ç»IÏC|0]cÑ‘¶¨ ¾•Y_éL¦Î®5)Z;ráì\Á»Q"Gž±))®ú¶p”ßA7@6sT¦7gKÕ‚ø¨ékJÑK 1ÕJð”4`B¢.Òm´&s3Ï1‡m8ÖÌ×&Ëœ¦IíÍ{M•³\ìWºáƒ’'OEX=ªRÏÍ™FøGT€Â+Á±º&"ém–JJ—8•J¶Ôófwвe²e PiåAá´©‚b+΀j©>ÕÃû +4öË5èÔ„8Šæ¹´é®¾~=p݃Þ8}¿˜^ é. ^çžûï`Ò®`*ôƒ`Z µ4MSXM’Ú;ö´Û$Òƒ °bûªiü5ÈljC­Çߟ ›ÞëpE„þ~ÓKîe³S,Ö;¿Çgë!ïþÍ_ãÆ|ÛϧÂ0>¢Ý¦Ä>‹õ)N áÁKd ÏÏ~à[.àZós=ÈÛ©j@7ðfnª:¼‡®ôz[%y ¶¿{4ޤÛk*¹OÈŸà•!· ˜¦Æ¤¦ÕʉºùW¸"Ù§RBð äŠ +ïoePlÔºwü0Õ8a:”. +•ŽÕ_[˜ðµA (½Ç‰¦¯Ê^À6–´M EƒÈ=tÚ6Ýwt  Ž¢5WpòI¯ÿ‰k€ 7{‰`Ž­A½1-‚†Pn_>Ö (iWF$Ád£$Á°Á:ÁÄè AOÏhÚC»y"|ñ¨g—˜IòÂmóÀSìz™÷qá—Ìmf,‡"œ<%«„†Sݾƒ“Ι£W+ Žz°lF€Ýy"øuÁ?\EX<ËÙ$d޶Ë0|C%))}Ú÷à…BSÊœ&B š2HŒ•ªhÚ€âpUÞv—8`£%=œªz¶òë×+Pç¥~aB¾T8§ÖU+a*Oûo…Z*qtÎ?·7hgºáè.·°÷"î—ViË¡Tu92½³lbSL0}^,B¡„µöDهǜ2N¤ k¯À8{Pɺ0Å* ÊQ¿Ž–L”‚ð] 8íòÈxjß5n€Š…ŽÐðñx„ÓciÏ-Œ@3Ô¬Fð)衯Ãið¯úœƒšö »Bæùhä‹'}ÒÌá D£]ÊMA#‚ÝrLˆA7Éå˜|Ï­ + yóÆ¡y“FÍÀ˺a Çå<К¡ÕÆ=mÖ±ûÁ(Ò´¿¶ÛŽÝ‡n:‘ËvÉinýX9gŸ³Ç_¢ÎWã¼ü#B#òðä[cñàY ”^_EYšºÓ=¯Òvúf°Z"¸ÖŽ ¼hNåIc4óô<‚øwÂx„>òPP¥sh1à_o©…ÂÎWQt +½äßÃ2êF "s â h<2F3_.m^g7ãíëÌ¡ùnÆaÚn%Z…:ŒÚÑÌ…L· Âµ)Üx2yv– +$VÇ`·RÄ­ëkh|.‹ýôJ¬G~5gýÿîן¡›§T¦/f+}àuõS95(€ôÈé,‰-gœo°ôàïø q_{‘{-{UT7ñè¹™U…u+›%Uå—\+Ëä^kÂOùƒ*+üsžŠ?ćùaaÙÏ{7%÷Zšn° +ZPæÄNÐÔO3sPç$ÅoI§Yçý¡²¼Üë†jn<Í€­šº¥s‘ <2` ÓÄœŠ$ªjBú„¦ Šƒ `•¶3<ÂLë÷ZŽ OÈ´{‰à rÅqdˆ6X´!£KøíGm´‘CÓ€ý@•%]¥‹#±X¡‘Ê"™þ(°ÆZ—‹ )KI=57Ýe˜£& ¦‚輂ÒRá\²[”/°æW ¸QÇš& N¸ÄPô£¬YâÅm¬Í<¥3œ–Œ#¯­¼÷ œÊ8dÃm\Í´…ß Kço¹4Ò?å,턟†™'*CÆÒ8¹¨ª»¦©ô CIÅJÀ¤|àhðŠ7åçŠ\IâŠçÍQ»RŽWRoGÄ\ý§u1Øë’|’›WK$?­OkVnÊ|1.,/òX +m¦ÙâÈÖNbŒWµ  `7tB寍ëƒ×kînÌOSÏ ä†Óz×>ŠKDQ4[aÊP Üäân^È*Œ Gcé·M»‘M±#ÜÇê’}ëG7Ùiv‹ +—3Xz¢¬s}¼æëùXøyxX£ÜÝ6Êt\J#:‚û4·µÔ„«Ö»°ÿ{OÝ÷¤^¸»áž ù²+ih (©†c … ¸˜†mÜîl¨ÕÝ B9Áù5•„®ó°ö%Ø"Ñ*…F{µ×`áÓ þz^Cš¡$PS†.~[¯Ãþ1µºô4o5èÑuhîk`i‰E¸Ëø¾Yµ›åWèÆÕ§’‡¶Õ“´áµônÝýC…?æ872YÁâ<¼F<Û.Œ*@¯«&¾tC†2/{'A(Ò°šðØØARöoMyõ˜^ß5ÔŒ{¬ë8é«…] ¼hƒ€ÿ¦ ÎÁÌ c x#7âÅÆaì_µl†nàuîÿo¢þ ‡Yªîà ¼ì¹˜jê"n¯˜ bÚqˆ8UÐ'N¬bA“/ß#ÃÓ 3Í øpp Dý%`ãÐÿ‚ ÅÞÉrô›ú&Ó>³18;¤üFGF6¾¼ÇW6&Ú7ÁœÜ|´1×Èúæ¤ã®<¿Ÿ‘æèüŸ¨¢±ÙmÙ˜|RßE³¹Ô?ðÊí.°Ïe#“»œîlœˆ?_â#šfskÛÐ<–ÁÁ±ÔÚàÍ}n0²i•Žz¾$QKràaPuêÆAõÍ9…ÑW >RÀë wcWü!íñ“áÕ7îsˆüT2î6 õóenU÷Ì «Ã wˆ Õ©B,3’0ñŠ]…_»Zr#‰Ò:¬×HWE7î!+`è²äÔ™]:7tzÉr´bê–³æ¬hÃ9j‹Í!QðÞ( ÀùüYôtø6Õ©k04Ý6DÛÆç +ª ']z‘—O ™¦ñQE,eþÁäåÐ)Ô`>.>ÓŠÔæÂqŒ)z­OÌ`d¨)§¥¡• ŽÒNùeÛÙ—ÛS)øëÎTüw^ 0nµ@øa^ý¢n[”lÆÙì±ì2PÉæþáú»’I¥‰(ÞlQ2óÃ)‰æð4Ö@.ÅÕ½Fµ^W| ¦+„Ÿ 3pK7-Ê|Jq2êB +dŽâÅŸžFîYÀ‚ëKDzà="Òþ®ì ý}÷úßß‹Oª>«ë]øÙÌÏsQnžÝõÃÉž~áq*¸ñÀë58ß„ÿ‹Úû‹Â£åùMíð hÏ.´!8¬…xqw(ŸyC°,•DÚ¥¿2Ãkå'p#ÜR‚ÅFÈùUOròªÓâ]Œx踆Fh“ÍMò‹zü²P£í²t/x7L5_7Di@ï¦1@VßþÅŽvZÆi ÚÖÜÓ°(8šÂ;Zõq)Ê6 )¸Ê†òBú‰mj_¶ëb.%˂ܯ)¡,Gü Ÿn&ØyX¬9îk•‚–˜‹(åîmV®ýœ(𪆑lj7û†$8ã$¿³Ž¸Þ¦«J%ÂjÈx-p3,›¼*JÙÓà°×þ]ãÌÃB%‡ÜX3í/çH2¹¡ú«-Yû {šýèIöÛ˜¯·ðo«ÏEÚç5û‰3Ç^öé]JÖ¡¤Ò>âpßÕÕmLÂu®ÙjÉËÐËOàÅHááB@^sÁ¦Î H¯F;ÛîñUtÈ‘ +>ç â­<ã¹¢Îi°Üd‚-úZM$ð5qÜ&ÒÒ7T@~ß*´ù»ÔÕæ}¡ +™ƒ#½`!æ_èÄîŽ qÿÂmÚ¢ vÝ´Å£¹%ôPvöe/7ŽÌn¢Þ7ÏrÎùs£*ø–¡jÁ]h%0ifDˆBÆ0_<¹Ù?ÁÔÊXç1h ¼Jžñ9ìHná[]ÀQÀ­U¡$>V*’B»MlÐÑ +Þ xØ…àËX¦›”PæµÁµ"v/7 +lÅôƒ¸‰I4:ñ£×…ë†2È,®‹Ë|$ûðÊŽëùZòÉ' º…ðÁô¬EÂ'?|ä( °t2êkõ{»ŸŒÚHû“]·qP»ü¹z‘Qõkô–Y`†4¦²‹œñp@øç9ޏµ»ÍÔ±Ló°7‡Œsƒ«~œÎ7ŽÒ_ãYƒ̹«©D)ž®eÊ‚ðtK´L‹ÀÍ­–Šûd&ºœ›Þ ©É8\AºÒf*:Û7×’ûtë¡oåÑ$,˜Šoá–Qæv¡.º»°wšÍË™OuY·ü}u}VïWžÕ ê›.iÒ±„Éû9NfNÓÄÑë hÏáV’r‰ƒ§¥ì>™Äû«ÓwOÉ_ÂI&õ(¯åÒ“x [_tÙ㽦ýH‹Ø„e§e#j™£ w©¦Pû/ËN†Æ;$‰áp *›Žº8ê í$ëÉŠQDY)êú~Œm³„-176CW pš@n>ò-ñqzH`$ñQÈÆ(Ó1Êâøc/`›°uš?É”—%ª¥ùcÒn¤i…˜ïÛ2ÐzƒoUŠdµ©<^ë!—Iô4éI³ºØ”M?6²<^A—æ®O•íD6Æj¢‡>lïÙ¼,7’˜´^Â~…!SÔÏó[ ¬ô%Ê`(n}\Ù[1×ûÿ‚ÀN§Rò´ªÈ?ÄádzõÞ|x|ýzU,·dyüjnëï–5²þsзw/ÖëáGö‡X¾¬¶ZjJ·î¶¦…ðné#E +~ä<‘£ð¨è÷™Æ¦–ú½dh>ä†gk†}ípüÆÃÄîaF˜›×Œ@ê— ×Ü øj$”N†qº¯«OÄ&%ŸhP·Þ+µX·s¥·[Ì¥'â>ÕÌ,FÖçUÅ–ÄýC^l–gmPÓ˒x¶´»I´'º·£ÄûÝTjϱ†Ï÷’L\d'èFÇÓ„tUd¾TÂÏçLì"ÐÃöp¡ÈÄËŠÉÝi“‰î‚ïþ”B„Älpø:\}>wQ6؇O¬;YxxШuˆq ¥²ûÄ%ÄÿÑ^eM‰,Kø}"æ?´¸ÐÍÒ]Õ{#›ì ˆAGA„8w^îo¿YÕK(ãÜÑ¢*³:3ëË¥2é– •¢·zF¼„ö,óè6!’®Äé–$Î}]÷dÜ·s¯ÆUÄòƒ¯Ø+ÉÄî°’Q앆´"=GÀƒF´•ñyU…1þ¸AðZ›Õamƒ@·¿ß Ð~ð}ðÿn@ͺ·<üIƒð»½÷l,ò‹m¨aÁßk èåw¯- · :ƒ¿×ðys÷÷ÚÐ~ù¥¯*W —F#˜";Ñ"M…´SÙy¢+¯`w‡ùÙ,c…{/ª¥xzµ7Rˆ:†ž„U²j 8¿f€×¨½ã&ðR™’³s:È_×¶+ééÎsݯˆv ~ õÛõàîÖÏ^¨&Ñ\ò@„L—ié,“¡.¡üäÊë ,’”•à'¿aE¨.y¦{¥0!NýR˜“´—H¡üMÇ.fÔ_nÏa%é°JJç¥gGðã2n²WÞDüŽ…*Ì[Ñ•Q‹Ì¸0mÅÝA‹¶ ±‡”ß2¤K2_?ÑØí[â;ÕŸþ‹r$Ñ<§îÃåzHV ñË(Àl”5-DMñ* yS.ÅÝ2é†Q¹® £H^Û#äµ,f WW^ïOÃçÄ+Ær?<¹^?ÊB‹>ïÑÔYq€»“Âl–ºÛã›sÉÃègô°š?ï뮚·“±=5 _ÆíEÿx +½ˆ½p{—½jØÛäú™­T²O?CWðÖ‚Ã÷Ii\³ wŽyÆKiU†V½d2Ψ  §.&\·â•ºaUzÎYHiEâ›òÍ"ÍŠä‘{ÓˆG…¢¤a5‚ ,¡O>½õâ6i8~F²ÿDzàðh rœö †ò$1s9f𑜩ùÑäþó)Œ íÜUº&½[Èeì[GÄÔatÜ{-ÏÔ í­¥Hn9g³\RÆhL¨V±¿sz ¨—Á",/u‚ò‹ÚbËí)šúÛåj‹hmÆ63Ôpè§ûð$x8d5‡ÝäÖ­ƒ®PsF :œx Þήh‹Ëf€C{‡Ø + eODÑ¡—i2Ü7ñæáŠž[ÖàÀÁµ7/rÒaBåZ=µ( †ÅÃGÁ@äŲÕy8¨ ̃p×l¬ †0-»$o\ËÕ‡“êSÅ«tdƒˆ¤Í:½P9ú'F°ˆ ·ù¢Œ\»ß4µˆo>AоòqBÚ4?º7 m‘÷Ÿë‘pó†—Á]‘Lb4ñeDV0$˜h×*ߨR8ëûéL–ÏInkBÏ•Îs'hë±£_ŒV€ÿ Òà…75b]¤ýaî‹<_4þ5Z*Äk¼4>÷×F+/cõ"|Þúh~iŸñ[a¸–¢p0>1BâsɈ•gíSkr—z³§Ä<òµÐ’ø÷ù=šÁ{ó¹ Ϋ¿{*Ô0¯~ñ"±#Èm>µcƒDÝ +®N<êÛͲwbôWÀˆ°ØüÜöH¿·cåiú}#Xl~¹tF6‰Íà]r«ÀêÓù<<70BÍup•mi¤Í5FÑvùCwâ> Ú9‚šŽîõ¿¥Å<¢Fét‹aüÜÏ?öLÚÆ®?Ë¿”ŠjÆÕÙ+ ™¤ë„ÍŔλdÒéç£Ïs„DY¡[4ìuEoõ4ŒÀÉ<îÃS2ÎV¢Þ6!’β÷¶uÝ@Bྕ˜ïFS)F±™UzsÇ35mIŒÁß!ŠrŒÞN>å3òqÆ  å~´2>¯ª0ž¶³ÿpà3š8`Ü@*쎑RNEW]ÎÇ8Q j|^U&YÎk_EINIPžÀ8Åt«eZ[°=Î2á]øB‡qç¸#GHs´õVS}› Š6DdsF󿏩‰†él[TnÄur|©)³wÎÁÛÉm‚¦ÐNas뫹ï›?⥗ÜK{œ<b.wpirú2ï‘ÃËቅÈŒT¢Ï[ Xš…I-‘›*dÂ)6Á$CÓᨡòÏù´< }•Z'.¶dÃÄU²‰D_ `ádÖ¤<¹b`z@®S²ˀ̡èáhEùñ¬S¿!Š7’üi$å×.‚: zuÀ€Ü05ŸdSÕ šë¶ ALÃÙÀ§ÿc»JÒ£YaØUêý1ΓåËÿí“mQ]G)@È“(ý©½jªšº5}R(.ÑFvH¤XU+:ÆP0ãØ¥;š/(® §•£¢(C%.¿Ž–’£íä‚Bèù¡µ("‘\|ÇuLAU¦Ø|ÃMhÙ‘y·µôçRkòëhRu‰Î9éo¥½ê””fßßôßl:[Ï…–Q£I†ï N!9TS.ó«Öú’·T .Ë·w´|‚ ]ÜÐ7šE«áˆ”^PiŠ{ÐÞa†‹QL Í:\¥¤‡¸ 7k–O‰%:Ú{@HôÖgÖuþ¹\ÆåâŽôR!kx=펮©gyþzÅ¥Õìh ¨´Ð ,ÎŒI<ÍjÑ«40›Ú„^@Ü#õ¥ ÚNv4'QÞòIhß/¨ÕšïµMk~ÄN\´¯‡ô É\Ghmã e0zîµÐÕ§Ø9QyÌHmB@>ý¤ÍмßZûóÖËÎùõ @ÓB„nÈhÿ¡õ +I³o«oúïF6Íi“—:QÏ2­SŒôä¾C…ºèE–¸Ã[GÃ÷RMq Z«£yˆi+MšOÜŽÖý‚2ÊÛ7<(ÂÑûŽÖB5"ׯ3ó#ŇÉDŸ UO +5º^ePŽ%ñÜ.#“Ç ´iç¢eüQ¯|p`{þyéßWýI„íÉÑõ³T÷+¥ÒÎÁΤ-‘].cêÖF,€vÔ7>DÍJk/h„šßk&Ë2y‘1];¸[ÊM{3\áÃy­µZßpÓpO8ßpóŠÆÕbÍмÞZûsëåÇÌĽ¢ÃÐ$ý‡ÔåS“Gªíùï‘ìµ.æs w[)bJv™H­Q‰—*ýH½ÇÏT±XÞ ÜbËmi‹Qa‘áÔuC$^´4”‹íx FMÑwÕ¶êB9æ^ˆnG^hk°½©·¼Ð·ÌS”^ ã5´Þ{ê<Ð>ÃÌ·áÝÌêù$Ü]_£B¹ è‘F|¯E.¶Ú^“@_”£ä««h÷Þ³öŒ‚s ‚Ãg÷ýÃzCçz< +H°3 +–ÐÓ!h‘þÖš:QõqFg(ˆ·*\1 < +ÍXñ(PÍü=£@3c¤Î£@[‡–(‚hJ£àB}Ð!kè<-ŽxFAO”c> +Îíhö(¸TõQð%ô +þÊ:½JDƒD¯ŸM¨ xÔçb×õ}zÑåƒÐ^9>µiû„NŸzY=´îÌÅF_˜%NºƒŽõkå@}¶P•_‹= ”¤S邿¼æ¥1HYÍjž’°_k²,6íå!Ê„¬«Í¥?o­ì”Y£^?‡Î9é/—Ò¦¨V¡¤à{þ»Q:ÃMÿ¾Q4)o³aض À+yãNš7öU}‰ko¼’¯Eê·w‹sÒ§«5é¦=@ož"ΰ)ËPÎØeZ—‰"!"ŽöŠŠÆg›7†CF?*©ù†e>˜tÃó0ÿR[/ž'Ƀ¿oHÐV¤§ ’d¶i‹¨EIÏZC­"Á– Î èôöR=óB-¦—hcª&N:¬JB«Lµ/qj²üIúŠÑ `J­½ Vk¾×bja„ÆNdì¡…i”,ÃpeD + +‚—BáTú²­ëÜH[Ê´­‹¦ZûÅš u½¹öçÖkž3]>(Dˆ!gý‡Ô+à#jqJ¾ç¿E¬-oîbÍ(£K«®ešˆyÞ:ú +–~{Zêo•Ýœºj‰|ŽJ†ª +ISO£P,˜ ÁÀÔ’}Ç")P"cí¸Ð#™|ù<¤Úb£_µ0|Gu0íµj÷Æ 4«&ä¾^¸^Î;Ö&À\Ä_7SA@#×úü!õŠA_%˜Õp!;,cVkÆL„ù2Ú9¸´è轉].›iSh¬TNnÖ‚ã@ö´1ÿ-íÁ®ËKÕZ*/*¦*Ú€Xέ‚1vC ºáY»{-Ÿ’ÝLþ:ºèdlòä Z—›knµh^ÁJØ `Ó¹!#ý‡ÐåSÍè—Ö8f™Ü{]ÔçDÚDÄ”ì2xó¤¬HéG5ך™/÷—¸­Y~…G× ‘vfìKÁØÐ‡é Eug¶ãF‘$¯sGFK3Bû@xd94ÙèW#¥•q¬¬ÀsNK²î0`cþ¾ VJ]±vvéã¹u-`ð|K½‘l:Tð?p^‰ª@ÇIÌžf³ +Ìq¨JœÊ +‚z¥”×.˜ñ*yA’½ƒÑÒ¦jÝE ù†È•O ͬÛKw ð§˜ÌLx¦LкÕ‰uj Û’Kò†œó·Îg[ç`ðÔ€£©R ØCLT:ÈÍŒ_E`š5ÿ)E‘b‘Š@¡^Ì\¬"¸¡U„Rܨ²Š€ØPØøM±R¨MZq˜E@÷£" !VÜÊZ|‹M³øe‰ðÖõ²D]“¡^–/™^¾,‘zb¹- !·%ÒTh/K¤•–ËÛ™“H–ˆ‘í@ÎR²Dt Y"â³ü±ÞйY"Ò‹,ÐÒoKDÐdý-õŠF_òËJ–ÈÐvY"@Y*["­Âð¶Dn‹ß–Èê½ ¶Dæ½ãe‰:–ˆP²DŒNKD‡,KDlÈéƒl¾Ú¶%êBif–è\-Ñ–a[¢£à¶DßRÏšH–ñš‚ßx€ñ4kÏ< ShÕü‰ô‚èr4H®3Ú»è\“þVšZQu·ræ4Eóäy€;¦!ƒç *ž*š¿Lxhb ‹ôžÚ<àÞx\Оåyp¡>Î!{6<Ð{Z\iá󠧇5Îýx!ö<¸”õyð%¶E;ªÝ4›™…÷éÃè ÏS¼¬C®$ZÅÙªštq´7«Ú>]«Î¶.Öóǧ6í±6UšYñC;ƒ=i|è†ÙBÿÐñ­ÄY¡…J\~-öÔ²ÿצ­æ@ÐUrxmù@¨ºúÙ<&aÃå„ “<Ä™u·¹ôçVkž2 Ù‹ìÐ!h‘þVzE¨‡ ="25FAÏE¬ZN†(ûÿV«o¶ßðƒÿ1ªž¸Éj×…åh)šÜaèÙæ»Ûp¨&M?ûêÊ«]DzÜæü7´7$êwÚ©±›Oê ýÿ®"õ N_=›l£æêˆ"‹d®[%P˨ú`U w²¨~šÄ=L(‚ËTÁNd ÿÊçÿ¹Q ›ÜÒëÜÁ¿tìXôG¸Ö>˜§ãGJ®&ŽJíÙÈTŒ ëq4lQÚ"ƒ%`RR»´v@o2ý³FBQûžn&ÈtžÍ'h͈Z²&F‰_T±8¸ 5„²RWd$CÇP™„!#µ> ZJºÏ²(xnwÁ¨«¹…=¢†¬Ñ…$ãƒVþhÆ[_ +y]#æ0¦B^ñ@êLpQ;h=ožýuçkÞ3¥fbvyÈ¢~“êßé$Äܪ~nؽ]hL¸ô[ +Úµ^,ß2‘Ê}AæÌ‚òG%ʪ€~(æÇè}Ušv*B"Ô*Åd4î@6=„QÞk²/n\j%õŃªPp— +˜å/ 5æ¹s­¦˜èµ0×Ã1±k®÷ët=¾è YXÖ>­ÂˆÞdú‡Ôg‹×ˆ¦I}úÈ1'ÀÑS|T +–xP_BÓ§'•–5ö±«)R+@PwÇ¥¦ú†F(éq”RU›a…¢9ÇšvMÄd¢¹!=þì™÷þš¿:¿xâ’b­¹¢vÐzÝ<ûëN—ßšêVv@7¤a¿IõÏ;‰Au.U–Íø+–õ@SàüGÀ'5á½MD _º›¬ 8:ºdËR«¾¢Š@h†¸j€3 -Ô;dYP›Bˆá_Ðô0úÅ‚8Kx u¼Ü%€ úËE!ÊâIVUØŠQÙ×ÅQ1Iöû©aŒ°×K.«™\W3½Éö› 3~“x°g£\¯££C¹$ü_Ïß$]B»%^/öU{óC"Êü”xª^[tÏ#[Q£Nâ¹kœÄsñ,=ç¢ÞÐyž“x._NâÍý–xšQOõotS'ƒZzH¼ƒz‰§h½$ 'ñØîá)ñÌ)<%žN–:¼ÄS7/‰ç¡#ñê$ÞªÄs—,‰ç¢9.Ö¬î‘x½9&ªÄ;¯óogaK¼+­&ñ¾gúgÔàöRµÔH—55´{[Ç2êů¥†›ZzìðEBŠ~‘B!ä ‚bä‰ûh±¤Þ{ h9]ÞCsœ³n1¹kÜbrñ¬-ä¢^{[L._n1Í¡‹è‚,ìï¹þVBÐÅÄ—[Lu‹‰hâ2!CïÞ‹ Ь¨_LÌ­Y:¿˜H¯!Ý/&N)HV¿˜äÓAýbrèZLç’½˜N4~1aÈa‡D¿˜º8.Úb:ïó‹é$b/¦“Ľ˜¾g{•*à¿ÄÂüß?X­”leüKZ#Ÿlð%”= õ€BFu|“å£`r{(C$ŽŠ}ögKžZ OúMôeháˆQ4¥ŒL:/Õž—‚cî!ÅgùÅŠ«#[FU(F¿ØàY…‚A7ÂúÁ¸qgÁ‡•Ç/f0†ãQ +¯`?ˆŽÚyºp!õØø™ê!:ªÚÜY~+§…Ý5ü" ÙèW<Ò¨‘F=Ûöël Æ=49G±HÌ +ÔBŠœN¨¹ uæ+ Ð28¯™Ø†çã‡Ñ‹¥¿¢Ý ••¾ëôù–_Ê'æV¢èU»™H¢ˆÉúÿbHÆmö9¢”ú ü P+_7ØÑ¬X¨ ¶™ÊÊüçbF~®Ô“…IàéZ2raHG‰ ädO\}ÍôÞÑÑä=]Pæ.2ºnÔú¬[8”>©Ør°K„Š;á lâBíÜ’!ÈhÖ.ŸÐ¥ŒÊº"9hV“1ntWþë‰fÌA;nMJ"ñ‰ +±P$œ†¨Ät(IߢÙÁïpߨö–ÂÕ€„ЗkCŸ×‡Ø0ÁT¹åf1Ó°pëX¶pø‚ð˜õÞƒj °tùrŽF³.ÑŠrçX4ZZú4[GÈŠ_¥I61™*Šñ©ÎÓ¤Mi¿ãôuþh˜/Úóëâ5êhŸC°Yª¡˜]QO#än›üŒ‹Ý9Ÿo™`Cä¿ìüN…{¢[éÝNn¸¬£t»#Ù¬„Ý1š-œÀ]—˜–6¥ËÊÂÍÛ´-gÖ¶;Le‘TçQd TQP¬ðØ| Ëú‚°¨ÎÙ™´$’f0½ÛšƒfIè`÷:Eó‡X½¨ŒaU_¤ _î,¾ÄGi ÎòÜS;žH†ñóe+{«Ir÷¼í<¬³{`éã-橪œptÂrR·+j­·}/ñœ`K1i0Z«6!ðÆu%vB´½[!f!rQ¦3)¹®št¥]`ç¸é‰‚'Ñšv¶ºP”¹ù£>'z¯WI690g­¹…鉖ìhÌ8’¢³3ÙòE$bã.©‚VÛ,жZ6Jú1§¿CÁ¤FϨ7çÙ0=ò–!‘嬬ÏTÛ‹þpLÝ•0ƒ°ª:'è.à!™Ã㈙éÆãBÕ„a¿f+i‡^½©ò,?¨2µ&ì/CÓÕÐ8ê^¾jçlËðþ¹DUn[ņòY¦ÕKèj„G‡>‰´Ç:‰¡½$ßBAªšêeßHÖ‰RF\mÒ¡î>“,ûeÊ2Ö2Ó‘úÜò}e2 +•/F›À7ÍäJžGËêu!Q-¢“ ýú,U¥YØIÔÂGt Ï”!–I‚”:Rfñ‘™xA˜YôK°+ª´kiéõâÂ23ãP0¹eƳnV¾sÿ¼©ó§àßÿÛxý㟯?ÿðB3¯l\ÆiÇÙñ8ÐÖkûìe¾CÛ½í`ò[ˆÞvì*}¾eÃÓvðr©ˆ÷²üˆ>f¹l9KÝqH€QôrDsJÞbj”iâä6¹AzJ|˜ŽÜ8Xš7¯s­jÂãíC}‚?·í ˜Y m;ˆÔ¬d-lB k¼}•d©oãA1YYµm¬«%£{ëÁšJ+Þz°ø½ÎþÙÖC‰’ÒÃz(£$9롬“æRÖC¹Ý'e·õ Z*È{¬‡¶E¬Îzìîù|Ë…7ÖƒõWñ~Y ‚…̲në‘1ÆGâmËzdŽ0ó¶D£ +»m=0«¡¤ª“ÜDô¯Ëz Ô4GÁ;hIý}ö²ÌDÛ>ì×]ÖƒèÀPxyÙ‡ yù³Høtq®6z2—Û}äå_ëm>¶6¼ùpÐÎÄðï‘YKV½¶…AB¯ƒ¯ªrψC|l» 4½{Nò->vE÷îDƒ4õ +©xuËBXZàW§øH¹¿×O™J"·k7ŠJa¹mõ‘Xô˜(ÀÕS}pxdè¡>ˆ¶Û©>V¦—Q)“_†p@lö! Út»:ƒµCó7 +®ì²UØYÃÃm"ÙP yÉÞZ¶ò# Mnà©>RA”HõAÔ&ùA|+>ÞÉó-?$b¹_òC¢Ë~ÈÉD,—ü”…hĆä,ê€PÉ_úƒµ‚O¥!uÕÛ ²s²•ôI¶ù–â&ɕٚK~HÐr7êC‚›k;Õ‡ä d«>$÷¤œ&†L)kñP’{™ƒK)H…øî×ÅÞNõÁ²cÉÿx‚¨ÐVýLãÚŒúz¯Éª•ægõáØôXwNìÀ´àú‹üÖ=>Z: +ÖÔÒ)NgìØlÅɆ«[gqR¸As8µI‰Îèü»²Õ%„¤ ]Â:>u‰˜Â}m ß”&š³ *óàÐ%2¡Äté’âüXŒK—hê’uöÐ%ë™C—l{6´¬žºdywèÈ Pžju èf~°º„ñš¹ßº„¨¯¥Kˆ¸˜­.™ úáÝ:P6ËYu“½ô2Vd<Â\üÑJÚ—;òpH¾îí·vÁ˜/Ùj—‚ˆcØÄS»­Ò¢K}€âç/(‚æžâ¥P‰å|‰—ÂFbš¶x)”M¥ñRðùºp—9¦HÆ^?Ä Áæ‚/D:…ÅÞø¥ ’ˆ OL¬z‰—ªJ¶´ ’ñÂÞ5Ú¥4”±k—v)\3É]Ú¥`Ê!V»”‚«k´Ú¥`XÇâ/í¢Í˜.íÂ,ã™h´K œSÑj—U ¯³¢üé^X(''Ûušdc~´œv%àÐ7+QK ìlh¤ýÐ7«D~¼QÏE»õ +®1KßË}Ã.5[}#u.a]úF‚Û¥oØÙ2p¶@ᇑBÍ@¾ì×CßH¢¼ö§A™Qt¥Ñ7’w![Kß°@@Äâ©oXLµÔKßHÕåjô ¿ëìÙ°~ÉÍZw~^7õ´IÅèBU(К=³Á~x·æDúßmÑbŒX~º´O! ªÕHŸ‚¡Äº¥OÁר°—ô!šº³Ò‡PȆï){(¼¬êeËM™±ÎÊGlaeÊgùv(vC`&-„_ÂìÍY¤¢Ö;kƒËʧX²¶LYóÞ[åc ‰qôP>òÊPRKùL{å³í6Ðôî9ů¿Y-Ö\HS¸ÔQIƒ{o%SX‚ŸŒÿPG…IÙ`hÊ=/tLÓ­Ž8CûäúÃDáRG=3`Ky +ΣCUú±„ Ç™P- qTÄK¡%dHŸ:ˆõˆ4†mÃö¥ŽØVêá¶_µ2ºc©#¹ÑW«Ž +7ìø)JÂnæh8äSX)ë¶<"”Z³òh%úÿØD‹[K\ËCBI +ð¿N %éZaJ(Éë Ðª—$ù¼ÝBMÐ!¡¤ B½$”Çþ‰&¯¤}k(©e8e4”Äb)¡¡¡J¦·†’Ȳn–†’ СCCI¦H9·†’|Ê.ÚAdâQ‡§†’ +‘ºôŽÔ÷«…p{ùÐPRœ±_rI +Ù?£YU˜yYm®XµýÃïž%î?äßþöÇ?xò¾ÀdFð,,a/àGX|¬Ìؘ„Ç0 …$XÖŠõÃëÞB½éˆ™%vèæÌ od`cMÀ¨»™’zÿ&ò³·DŸdÞ¶@èN=øfAl÷Ùu€æ¥xZV`Ãè,M¹¬Åi,š(~.§ ÍDÐíú†V„æÑWÌõ/¬h@h¢m ˆ¦Œ«Æ^M¿xáBWÞt†j¿±âùNfu”üé_ß~÷ùËß>¼~ùðë/?}þíñb_}Ñÿú×oÿñåó‡_~~|õý÷ß½¾þûÓßýò?þúñg~úWþ‡ÅæðOdiý׫ެ7´f¯TŠ&C·©74¤ÓWÙí½‡IÓ1J^/óhªî³"µFéL P(Ü$±Dï¸Æ£ýÅ%!3àæ(BÚ-oŠrÖ*(Å ¡ÃâÞ¹«Aóµßk-ÁéË€ …áaŒÞÐtnž|=£5^yS4'îýmΩÑÏ‘Öü‹92j¤\úé@Ó‡d¼Ý¨Œñ1ʽ€ªóA V(;ä«’Û osš8éµò$yeVÊÚªÜ0¼Ï"ÎyíIý{ž×-LJ}k¸Æð°† W]óz®É½)Ú*–‡ +CçškÅåŠ)´þ̳ÆhéiÝŠjh¡>Þ ôÈ@æþ–§1%{r²¨}¦x3!Z8¿G•9:Óà ÁâÆ'’ôöÑO#†¾ÃȪ!宿O‚–F=:LaX ÷F ŠVW8N,Cf\íYוèìg*éUyZæ­ì†m³E†oóèë,yåMQ×Ô›eŽ…Ôèç@Ï Óù¶è¥Ÿ´;•2oõmRŸ`Eæ|‡IX³tz$ +ß`ÑTÝÔ{27š:‚nàVH)E#‡¿(‘uø7äæ‚"F¢^¸ÑgëÈÖFQ8=eÞ@Éw¯Ð4†T´Ìôm\ lÕXÐ(.çUx§GW°ü1´èfTñË‘"<zf ÌöëCS% _­Úá`ŠYª¬d -ê–‚®”.6‚¼È+8!+Ø2v3‘&‚Ô@˜Ùß'‹îÌmË›¢ÅKÅÅfDp „«sé{Ö««rã|¦ÉF3Oàx»=¶ÍY¾Í£¯g´Æ+oŠÆ(¿Ì1Ð4ú9Ò#´§j¹½ô“E£C¹çUbºq7V4] H(g§¦J¾B?Ïèæ&…×;¸ûX³`HÎK…‚…uLú¡X¤Ž;÷£Ü¸!T†¢Þ¸Ñ¨ÜùíF!úÃ>ðHÐ|MkðU®£S1Ÿdð¬òY4%V5ùa"Ø®w;Ó^gïH£ìV\} ññN¨ç2ÎËÓ ?2.L€2¤Z³mìì„|3О}°E1XDhÈëö=Ïfv²PUÛ@SŠé¨© ­ÊÛgM#ìwl'lƒVÝo³-4ü³­`"fZ( +=Zƒ 4Í~ŽöÞÈ¥÷pµÂBm+(ÒeµLxÒ´ T¡ÕÎV`ؼ–i - OX­9U„`¬V0iƒšV8PmóÈlciláXj0½¦´ëL{a¹wôŠÃî…#°Ú ϱIˆH|~¢E {Ë´’ oYZ”¢ê­“Ü-á E)é^?¡šÃ}ûº=Ñ"$Þ?hÑ)9Î.Z´ž±´h›³HвÙ"Ã7K‹Ž`-Z}ÚâI‹H~ôœF°§Æ‹-Ð’"ƒ%Et08KФŸHYq{"Eìu”­!E¼ORt@‹Ô"ƒNRdÞ˜¤È³I¨zm‘"Ìã]^Jжs–í ,R´c:9ÑS”gý'tm,×.ˆ8kwAÄZ«)Ø]€,{ßïytÈÂ5ø™ûTü•šÚ½G8O|¼vlwÇ&XÀ»ûœÙû »¶1kèo“-4|³{ÀDË숙•c·hýi3‚º.³j÷€€©Û=@'S,v`ziªŽ=À ©.±{hŠ-Û=Àzñ²{À@fÔìÕ=`™{ÀXcö†l/©™=¼©1Ý˽c¬8ì=pV÷Às¬5 ˜¨©WzÃaÇd厺e°ºP`)Œ0É®FnJ(Ú\Ĥ†ÒB@^¥âØÉ9¸¬hD_˪ ¾´ ‚žÊ×YõÜ‚±dpeÇ5%¨ÈŠdüÇQaHc›WÔU@ñm>ÌZºOc´…†wóìë,ygp˜È‹·=¤V?z&¨9¹ÊÉ>ƨwdÞA^öq]*ïCÿÉןN”ÜååàBq·²â¹/Q¬/|M”©K©ëWµú!z6 ^šA«šÝh8óߪr·¯ÝfxƒãJrök¡%[ª•/6öï:»‡†ã +nhfZü!Á >|UaãtG=.框[ص™½ã‹ls*¥¯—öèÏNJÌYQ²n)qT²}Üõ€îËw˜Ó žrºAhKúF¸«ÿ܇çÛì²ê3<1=4é{Ö:1s¼˜N@K;;è ì»y1¹l91¹)Û#ñ{žâí¨ìò¢¤:!)˜½Ž˜dõX:ÒKò¢£: Î]Gh!ªÛÙ:ªl[ê¶ŽnÐÖ‘CŽn¨éȲtä¢q:‚W(Y¼ŒÊØ\2r×s2ryX2òi5½%ú{"E²¼Y;8…ÜïÖµ—Ô¢·vX{³v%˜/wFWÎãÑ _––^߬«7äæ’6²­ÔYé¬Ý9Å[»Íöq'dÍ»ykç²å¬]梻µsÐ +ú=Ó?‘ÙÓû‹µs¨³vж›µÔG½Y;ê<½Y;}!¼Y;í(ñfíø‘»µsбvtÖî ÛÚ3¶µ;Áxk‡RÏ'î¶v ü!¢Y;w½cí\–µs)\Öî=×ß”Ò.÷y„ms0á¯y„ô¤â§R˜F~)Ì5xâg9”âxz õu’!ØÞâË8 ÝäÖÜ=4'ÀYëÆÑ9Ç£К>'j‡ÌÛùyä²åæQvÎ ² ß3ý)1äf6Ç$‡º™¤hh~(1qˆJ¹Ízº‘„¼F{Èù‘´F#ÃI¬g„I7h$‡º‘tCm$¹CÖHrѸ‘„ßÈnõ3 µXL\3ißÎM$—„5‘niµ‘ôžéU¦€ÿ„EùϨÔH*´ëW`H@5Q¨‘?•H-¯ü¿÷Ô·‰ná1G1 H#¡²#”ëi¨ŽM‚"‚¨£àÙID¨Vê~ BYɺaÂÁ5 +ACï0ÜP žJ3§áŒŽ^ä ÔÍëòkñv$‰;ˆ11a©!p¾_±#î ÐÁ†6×.®HùJÙ-ÃfxR†»;{Á„ ûøH¢á¦KÒmÌk)Ÿù@Sç#ˆQHƒ×Bb$d‹gÕ’‡«Yg@¨¾%³á‘v5¼CŸé•Šé¤U-Ìã«â?•;Lf)]!©ªuÙÄÎṷјbŒyþ° 6êX˜F#z\#<û€ a`,)Ô”+dM”úµ„±ÖŠŠº¾çEË’JF<Á*Ì5ìƒÙ|9.ê +Û^€£!°ÉЛ ñ!p~ðÿðæìO-’ ðC©SÙ>~ ÚM@3}Þ08:A¶I»…»U0UëÈÜFe<¾Â ‹œæžÌCçúÀiݽ“t@1B;šK”2_)Bû8Ì„>7¤ÛŽUeÍü9©Ñ6y dF×L°örø´ †@*ÍT=óß‘ÄÕÈ!³–$ðWÙŸ/`®ÁvSM‰¼Y¢-:î†üŽ®·Ð¼ô«–ð³NòÓ@‘°1Ù®Õ‡¹Œ–"Ö”pý]£¢±•nŸ¡954T u@¦½`ßç dÃÈØmïϰX!ÏZ‹i¥Ó**=A ½0¦veèSùEÇxaû,±GòlÝ$N0öIb lÝŒêb`b‡R³–HèQë [$¯ªn ã_Š +-T4ì„ûv¤=C;EJVï•€â±õI*œ M²J/¡-ÒZ|ª„lâHB…”d& HAwÔCÚulŽjs ¸+s± ‡–=*Ó[ž…Ö û ¢7­aÇ-9(Ïòéi…‰Æ»ã:å2ÐÂÏBA†Ã¸—B8³U…'`ôTp,x"¢]çRÙw_ W+ÒþĆu­Kt¡(ˆQÄGì u­/Jjâ›uÕY·¢âÇUBåƒçßq/ײêµqL$@¹ç™—fõ O@ô{A‰¨Û…Z¦Š±?D±#ã'½jeQžf?H1Xeáßè|,aè¹Ðº=8Šw#7¬ÁhnI²XN…µm$ƒÚ ÃÎaÊ3³ýMÙ¸Äa®lÖ­¸Uìz[f x`2ÓŸd *"ü +I²kXÆB‹ÌOÆVV¹Î¬¼×ÒjcX €Jò#NíCø¥sþ9j©kJðp‚ƒ^Ùªƒ&«¤M«^=Á@8d’Þh0Ú'!jXm•–ú6kèzh[,lˆÉ‚ÓîÏ’KŸr‰š/fBøÄàˆˆÌV›Ú¥O$'™Kó2“/´šPJ¨tðZ¡Š´¸ü±ÔHœ®t–YÁÙÖèQ¤ÅYr¼$Ò B3 +%/s=QÒ¬™Õð(Y,1£š0 }4Ù-|O¼×ø¡Åÿ©½~ûßõç|H´HUÚ§èUXÜõeô1¦#Õ×V°‡ÒV%è '[ƒã °*8¶±å®¥zI($Rƒ¹¢_ëˆ1±†Ô0Ñ*4ü¬H†(ɉÀØò)TY¡Q—û BçÉS +]ƒºŸ6=3Ÿx Ñê(™«õkIb¶E‚Y¦}ڤˠ>ÑŒ -ƼÖÈî ñÅys–²}p·âNá+‡¤à£qÈ Yö⮊5ücÖ1 h@:©«i†Ý@oA“¡9d?a¢8'Ø$+ß*Ñ03ÒóLyÃæh:p™É•æñEýŸóùÙƒŽrÊ,6Ø]ð§bމPÊ_V5žYו<ÂnGQë1Ð7*‹«áE+IZl™Ï.©+ МÒ0fAôÜG2Tî‚W “öí Ï˜oW‡V+=ÖsG뀡¾kÑO÷އàhK{ôòņ›ÕÐD +4æD¡u£;ˆÒŠ•È@„R I5-ÞM°ÂÃkF‘P7þRûð^;ÊÌîcåŸü,,Æ»¿WA¢ÁúvÌÉŒrþe[èoÊ­ˆåË€¨<[A©ö닚.ˆRK»M J1—¶DH˜CÀ•eðÅÚV10-Áè*æYÙ‡Õ .<=H²TL("¦a …ÎRáO ÿ¡ï³¤'†V¹4Ûó.¨¬lKvï5Øñ0iÖ&i¹0ƒø5J4Sp/‡ öh–逴òEEç°jÖbí®uü?ãå®dI Ñ_¹6¡Ö[öbŽKþ¸ƒ‰Áßs²Jj©ïl8³ÉU«TÌ,½Êb¨qÖ;ùÌà6cQ­5´¹°^fC +ÂÊ +r½\0½«D6¢ÇJ®\?–²¶:»¹ÔÛ"\Õxih ¢ CÄ« ³uÖ¸Í%s^~ü¬æ‹`þW–®‹å"x_ºUÔB˜—Í# t“üŒQ Ptç¢i,Ñ3–zî.úué(„4lF=Ší]¢dôd‘æ4 HâDC ¾EÛâ»5Äã¬Ók·¥Î‚q-ƒ6­÷ÛÔ¹ôkYï*ãPÀï=ÎuÏô[½ØZTÅ—ðÍ úT›£wØ MAßO[Àuîf.§ ÒU"/Ç ÇPh,ö3ìšoû^ÞÏ_–Rz9W7ø¡Óf{b¶÷‚FM¾¥ háÌ·iÖO](åò×m”ݶ_õΠ£3†)|GùKŒàW_F „­èwˆF— =õLÝZ–Yé"sFqöËE‹Ä5¶+ͽƒ˜0¸5Òr†Bn¤éú§ X ÷fï¶ Ò‚qôþFÙ.-VÍäÛ±é72,Ÿ¶;^΃úšú“>™¶­i?ãû¸¤Ê.)¤…}:#Íó&QVßw !Wl¯ï…ýui!¦Å=JdŠÎUËÕ5Sæd4'~W°­êˆlš5%Û$„k1>=ø€ˆ+_k8ðu×lŸZ—¬/N«žõ¥%sùÅ6Zê[îšø(j¼O»öZfºL‹1 +ö8e4Žf@£˜3ñ²–sg™¡у6~ö‡WQbs$PÕÖïí,E xWmj C=ç‹Ô ÁrZTç*0Quck“56Jã*Ú'ª‡Å^ÇÕš‚l;Ù{‘ü2k„ÿ°ÿþúWb+Ë£å6¢²#Ò÷ …Ç ³ýÅ…'ŠzÖ€T *zBŽ{0ƒ°Q2ÓùÓ¿¸QT?É^䨕ՑÚ鱚I¸ü?PÔ< w´AÊÝoî¬öAí +ÚO@±§ƒæ©ô1§QÓ´±²²nH_$ÜÖFz{´ŠcJ±·àÕ/‘±¬òT=Î`Jõáùɵ“Û`{_+ŒŸ ìsÚg4|:JeíWªïðÛl[|sD§¿5cÊZT $AhoÄÍX U¯=9[[lP$;¶]§É¡«ßq)j=í`¢0k8ƒ2‹@«³Ì¢tËìPñ!•š—[$V¨9»B€DQBñ&7™öÁ–ÚìDš$IB¢ÔzÀÍG+a»ÖÌ‘/‹éÙˆ Ë4(ͳõÀœè Ýí; ÷X<’xÐ#‹)ÒéQƒÄKEŸÇ%ZÑØzÐ×]³v_²›óû@»xüöŸGk9úulLŦv¢MP +ÿ‹*ëM8¥váT6%kñò&ßèê®P¹¤´úœ#¶:±Õs »†s”·1Ô´LVYǵ0ÏKèàž®È?N49W[m˜¿:ØS@šûk ô{©Í+Ž!HbPü·¹bIk½&Ô•Ù*‹8<‡ÒYºs”ôü¡ì‚ûrõja:g¿wZŽ¢;QP†ž-é%¨×lóAøB/ðóëìDBèÌaà ‡~Ö©Uéò?g°3?¡ËFgÇžǯh–ã¸b#»J<ŠtƒªH•Åi° l7¼Ymkk8‚Þ×Ùúfñåéì4€¡ð—²ƒ!X‘Ô•z;ò Y}ù V”ã‡ôŸY:ÿ­øúÉ0ÍIûÓ§­Åèvé¡­-«´åDõPxνóÎÉ®]f¢oY–q ‡z`vôÒÌõ97Ï0ýÕ +IŠuHLúZ‘%ëv”ŸGÔOGfMED3Ìx°jÑ ú]›"t lÕO.4à§Šýl*%#œ3„Ò¶‘®êj7Fñx‹”f%jÙ²<ºÜŠk ¥Ó.Fïj®);}ë­uX{j4CˆTй:„™ŠoP‰Õ;ì‰úYšøº†Cãj~mvîr±vƒ`®DË «Gô2L®H@Á+™CKŠËœ¶C;K›æ)`²Ó›Hy˜zú@°þà ÿF5íf²…0&-Då_ŸOQ†)©áiÖU·(š’4®jÑ™EIŒ Ñg{‚\:H¿µGï·©–¬_!k¦ÐëpÙ +$K‰ô™h®ý •Z_lD’,žºº¥‡iRÚOwñyŽÊ©¡-jÑiã<Ž?¶¥ikèºÑßè"£û’=yß×'ûzý¡w’Hã 3­¤ÆÇ6o6št?Ô¬×SùŠÏ«T9!VÌ\^™[(ãÚ»ŸÅìØÐÑ«iBA NŸY‡œ‡ÅÚ49½Ì¬×ä#L[h ¢:ÍnÐHŽy–™FáMG؃ŒŠ¹0 <2g#×z; Å4—é…*¿ †­6b÷ñ8¤¥“PLðNˆgÖàYØ( 1{Å4y´Ç%]Ë£mG³ ‹q85môÃ_Í•'ªŒY! Šòê?©²÷À¾Ê~ž¤N7Õ ÚEÐ`iƒúh²•@HLb>êWÍ‚3Ѹ¡ºb92„>ædÔ_üd˜ ´ÙÛÜ¢³ÍÑŒ<¬k¦EDëJ³fÀ [8 ¤œgú³í;Í,-Ôa„î„Æ›5æzгúl†ûø®&—ŒÒÍ0$8ñ­Ì‹õñÖ f‹†s_r'ù[ž²lˆlÆü\UÆ„_=< ©lìÉxwq›Gèp5Õösú7 +±5I徤É7’â#š鋿­ÇùC}Q»„ÜÇõis‹b†ë’: œ~œê¶Ñ>WŒ¯'ŠN°?½ŽKº¶ dõˆfAöjot öj(µíóx߉àÛ©pÝ ‰Óæ«oÔz6>³hky_¡þäÉíÈâ }z7fó7:»Ñ=Õ>\ÙÉ­a< LQžý@§T~=Ð[S÷%·òîhNá¾çè!Ü5nÙ×t~ßþuÊu}ßLêc)¹?¸Gðû?ç¼óðè¡S0cjlÂê‹Tøf×nq•è-ºh×v‡0ûêSG™ŽaÚˆMËx3;ꛂH8^QG¹à?9¢ná +endstream endobj 31 0 obj <>stream +H‰lW»®%¹ ÌØ8±mH¢(‘¹ÃMý8‡þW‘TŸî8¨Ûz‘ÅbÑm~þn×èc~~üù‡5»ÄÖ~ ?‰úe>ֵޯќˆ_s. d÷eŸ¿¤µËV³Ø°kÌÇ%s|'W[“¢`!q;H͇’_yD”Qò‚ìj`âé çg|¶íÄ-Ä…û–娮¹ÔΩù2 Ùèq;Ðf%´”òNY`™ ü9mÅÝ\Š‚2³@Š#M* +D¬D¾Äªqï$rÊJ +´ÍÅ5¡iËNrJg¥¶-r¸B‹€¨± yH¶Ñò±,¨AB ù¼H¨Á¦×#Ù<ê ^†v#ŧ=Ûë»ÞJŒx†¨yt•ã>À¢mqì¿ÿüãÿÂß3”½õUj:Àóÿ]¼×ôo ¸Þ÷5n–0…²Ñe JuÍRñp\¼.Jr5)”µfÔ£¨9è©ÓåŠ/¸Šsàcò™,´òÇú♬ªR*)Ô- Ë¥¤¹%÷꫉9‹šÅÒÂ:àð½ÝžêÝ$3ú@)j[ìVUˆúvf§z ÔCkÕy½-–%œ€ÁÖOï€Ò Þš’ƒzˆ.Qb#H^coDæÃÀUS„Aq¡µC„ ¯ÌAV„ÇiÇ-1a«Er]Óx Öuÿ"\©à Âd/Q”ø\úX«ë¾ºXxï„›kÏœÝ(Iq¹á¢L¦ ©›jõcviGеãü€~–kÑ4zË„Íì#îtXµ}4:ÝLÄ…Yl}¤™AŠ×#±h˜P¯¶ÏRã F‡™"âÍFÚ›’PwÚ¤Ü+· `ËíÝ–¤zrô Á!Îsb¡œØšFú³ÁÝâÌåtóÔvF}¼ÿgf¬Þz/Gí¤±ˆgpêó|(î"ÙDÚÏÓ#Ù«4/ÞÁkj¹[³»18]ºJÏ8á È +gˆ’íËÚØ8¬5’M¢0‚¤ÅöLX“Ê”Úîý¬ƒîVú‡Ç3 ‰€xêKù{¿Ç^g—SíÉ/ø +Ä„lÅCð ›5F)÷/ˆKÛ=’”ê霣ÝÔÜî» È;}Å‚­) _ôÕö¿èÝ’ñ¦_2nØv¼©ñÕßWÛG½6HÌs9‡ôô÷!_èÙøoôÕùèiý÷!ßÖÿ{7x´þGÈíâˆ!Ð=Ãå÷0Éý“¢cõKï¶ty¢ð‰CÍü»˜†VƉÇÿùñÏŸ4T<ØKI›ÛÉ_B™†¢f²åb8BüE¹Ñ†Ó@lù”õ¼‘¢+V Ý…V±Hi¾áA)—A´’–i*2~2 ƒ3ŠªZ_¥š}ÇJå„ ”ka}GÙ«*îÖ}¬³欤×[Zc(L*ôÒŽò…E’öz´ dUŸœåØL†Ú} +õ¡¯RÙ— ûŠhÀè⪻aÙ×á%n(#Ì!Hóe ¥‘× šr½–aÜÑ¿ +Ugè}ÙƒîËrF*éÝIcÜW#r/¦‹û Úplv–Wx ]gäfŸƒî{£œÂF) +‹JÇ2P,ý+<(©lC”Ü&1Jð(šcÍ3Eî))KŸ7¢•}‹óõzo £­BÙCt1°31ñVµ=¨âS÷ËéÞ´ó „B¨ÃoÃÀèêKôÊÐ…®èõP\–Ÿw„Ú÷• bõ¦Ä(ÔñÎoù[lWÁàJϯì\`õ‚Ôþ-ɵ©Fõ_ò©Þv”ä5’‹¾*¶Ùà tÊÝÁê®àGüÔ¤ÿ¶?^ÍrigO¢‡2ïå)¬Ñukçí¼Ð›ìº©èñtÞׯõÎŽë•ÜÄ)C%Xþò§cȵ u1§žIûŒ¾:¤t솨2*È£vÐà‚'(»PµMTE®¦•q3Š jûÒcJÔœˆ&wÕձРϰ>ÄñtKMŽæ¼´ñPî!\ÌlHk± Å¥y£ +nÖý­Ãžå‰(Ç4N¤s7B]†¾Òÿ Äƒ»²uPÈPtpA'ä©–=—Ù=È•v£ßýÑ©Ç5êU­Ú!S}ü%Þþë 5õËÏ1\à¢óX[> Ò2}@ÌÇPÏ+HnªsÙª†íÀ ˜N"Õlnò¢žˆPot–ÙŒ2Ôp¶‘{BœqJ¢h Õ(¡IÖv<, kM‰Š¢Ñ,"Ëct«ƒê%¹¡wlô]Ïò“I<½wÃÑa–O…Àû»8÷]²_2Z×!;Ä_ÒóÖã`ÝkÃøg>%qVÚÛÉ`ÒŒ&/j˜cû¶Í¹½Ì…B±)ù3.á¥?®3ŒBˆðu™ iG3ÒõþP;ªH–éÞË)“ì‘è-÷4}Æ´T“çˆ~Ç ¥,­‰Ï!4M0 ;·Ù½šºéùFíÕÅÛµœ'–Ù¢«»çÍÓ•ðªx¾Ñìæú:D³¬Ås› ½ã¸ÑwŸå'”¬Qú…t—òw5-´ñ)Šòîâ³ëˆ.ê íŽâ†ìÍ n#6ºŠqµé³œrZ¶‰!¢©ƒ.hÈ å¼«q£gҽѥ¤çŒGnÏenµ>]t«5¡Ûy/onθ´úo©~ÀÝigû§¿vó»çGè¬ñ‹&:®Y»£…0©YPEID=˜‚¬,¼æõ˜²°¸Ù‰×Šì‡–²Mµ‚a+ •ZYë0"B +êÄþͳ [ŠÞU¯‰…òL´ãËò(‡T× +þˆœ*ïwY¢)tƽò¦Œ/€4·ÌDq )^dŠjl)lÁýt"²ÂwÓ-¦WÛ}À[{„ÚÖ¦üñóaÙaÖ8;oFRÓ´5áæîÑKÝóc-ð «™Õ?BéÍ8ç*uÒâkçv@RPþúh@É©æ–^Ðô™U¥™=R9ú¸äªø%¨›-'¢ÖðBâǬ{åqÈÕN`/͘–0£s:Ö1ùÕ}eð‚30榇aݲñ?2]cö·n™&‚Vœd–œhü»¶ Ì‚îô†7 }çÆ\YsÄÀàöf©nîf”i3††ØØc%Aø‘VGm̱^ ZkV`cq ¾©¥ékãª1@%DU½ o׸nxįiØXY¥A-6>‚‰Ô=3[HP9$ïWÛÁÃû[#«õ§ÌaMÞÄ­Íå"c åy&m墇%D}·7C4íçL´šw®•Ïݤë*y½€ÈDvIªKž:49¡Õ¤§Z‹¦š¼¯Q2'9sãj×ÚÆ2-ìäb§§¬2AÁ•ŤP)(8X¥zôYµ”²‡Ïw”ð2sY(šE!‘×0üÊEÅg5Œ;‰Vˆ<·è჋Zq˜ãð¢Ú£‡³Òõ¶V¼š³!ò¡G}86'0c¦É¼d'ÌrCvnö.=¨¨2kb™éµœvǯC¸¡A6{ØmZ_Ý(öX('Þag¼Í\œŠjYÎ)¬Ðx¹g/ƒ!˦¾Kc»¸”E +¼²¤ÍgÂÚÒd1ÖUUÝ¡êêbíò¶¶½Œq»8’b­·©É‰/oˆA±ól_{ГY?’¾²CW4ˆX'7{ê¶ +:w=”‰Ád\Å]ÃÜã®æ*ÒÖ<—ÕçR½U.µnZÆåñªìOvAJ°e°ØW¨wø(g±`¦Î“³{D†‡¨™Êv¦%Õá­±¸œØU[{S-<—vkØh,=)¨A×åbs_“MÕ¨_„y¡°ÑÍÙÎ åÌ÷¹RÕ±eð¹`Ÿ>\Ù3òlW»’-·ËU¥ØÐìâ›ì\á­rä²r;óUèÿ7€n9g•èa‡¯~hMª TEç{S¸@p÷NãÇ®ê:ÌàÉŠÈÜÍÜ–÷ž åõXð¬ NA]ð½æ€ª¤Vc͉‚ji¼¾ƒÒ#ጔãïïâ]ä^3ß‚ŽÊ3ôPhút)Û(®µÛ˜üE–BY©šhNs÷ðÒ9 ‚GˆÑwsit«¢ ðv8Φf‚Z­DìÕã@w‹á-›°@<"ˆ íŒÅ:×R4¬)€¥QJ@'µ¿ ÒEÅÒ’Û¨½«=mˆ¨69ž:%fè‘ÑÃ@âz(§åMј¼!Õï™á[uF“’+J(Bë1WuÑʈ~P¥/¹·±Üï<º€T„Zl·´ÐšhI¹ñï\3„+qw”S; TPsé×ZN¨¶Û°Ë{G¥­n½Fªõyg1²¯‰j‘DÉo¯‰êªÑk¢Bßîf–#÷~å}ÂÎDuÀk¢:`LT×öO±ïo¦üõÛ;Šð¾E“ÃFÙI  …2Wak“›+Ô5ŠFSr1™G¥¹¹ © +{¢ªÓ·{šQ ~.Ön…Ù!n—¼çððf:vÎÞ Ìâ­4¹ ohƒµô<â _‡Æk·ªô®¯¾ ìY~JG§‚KÜ91¾+ ¿d:ßFÅâmï*Þþž§7%&eÓà§f_o^40lqa»xÑrõà 0šf @b`ƒÜc­^b½õf¹ÇE&wêmO>¢@1(`»B/\—3cj £à;EÁ€VÒ‹Np’6p¥ÞfГÉæâÚnpY•ՂɈ„ìÐr¹ NuŠºQ4+‡"±üv0#òT¼H›2àÈÀ8îþ0Öx[KË]NT™Áz€à-(Ê‚l¯¥3*>n÷Ѧ¡Ì‡ÈâjÿÑ$y;ŒxP.Ån€…% ÃHË€õ@“QoˆKÓ3šm”¡O7tÅ.³ç2ðăÞâ‚Ì­‰ù ·ô_h³ÍϤ/CøF/ïê¸xøÖ~K.j×ò…丫ßg\È¥þ½åÿ [ÿÏþׄKÿ¯`Ba +dBŠTÔ²¢Ð®#{¡L®5%ŒY¦*@ó™DNSTTYFDõ#i(y˜ +ï@ &^ ˆp£èz¸ ¯n÷ÍlŽc«{UŠðŠÚFLGÚbMs«ºë#´¾=Ûáa/è]e}WÙF¯2i4½œ.hy‹¼« 1 kz(}ñ„ù{–þDý ‡Áöõ‡•ªsÝ%Á‡ æëo•óŒjRhèM\ Pv™ÔËаÛKvѬÉ©µ‡Íœ*ø!kâbC cÑö7™rl¨õn"nHA{£6Ê+xF¥AOQò2É)JÝ8Ö‰Ô¢[ðˆÜÅ /åˆeˆyKòI™bi! +ÈG» nˆ‰­¸EÙ(w„ƒA³Ý¨ÑédñÁsB'Sã—!kGD"È€÷®Y;*òeÞD~rˆÞCß×dQ¬°[ùQšÅš¶?Šöâɾj!r‘Òýj¿Ò¿¼€Î¢Ÿ:¡»†U¸«é{1Þ´ÃVqC±<^xMʹTipîƒ%„GLçz#‚6 ·7B¡ö=½¤>”4¶¹EhBˆŒ$Ef³ðo„Âáò£õo_Zrá½;“:ô]ÚÜ´VAtñ̻աÜÒð•þ½,I7†€÷3‚ôÖ4ßÔb¬Þ^Zúfñ™ñ°á¾†OA£¨¡éZ‹¼®YšO¨º¨Ò3‹w•^>ỏn¿ÃŸÆQ2"ÏçÞÏw˜€¶"ÿÄ“Çg•ì:©q­;9Qr½;Qr9Õ~'y‹?eñ­¨þŒ$i¥ü#2àTááû=Nðoeº·ì–Útˆxuéh«±êðë4°Í×@“bõëÒ´´ÎÄGŒžï7sX\ô„g9 AÍòC( ƒà*qÒ«®HÍþäÅkGkëŠ ÷a×ðaM×&í/§ŠàRT€>bKt󢹤¸¼B‚Wö‚¸T—ü¥Ùd¨žhÌ0š!¸o‘¤³ +á€ºá–Ø³šA$ÓùqÀy.É‚6’Xbh[KîˆG¬¬Ð¶åú¡§vÖ5!Y¹A˧ÖäZ1†vDú]Â8É;«pP&•‡ª½JIa»‘î•û®Df n÷£)Ãè×UîON_ÜQ<=t‡¦×uñ,/ôß}ÈÅm¸ãAwý|£»ÙÎ!§%¿uôÛ• ¡âG&V+[~Ö°¸c€—l u0`ÅN(‰JCvC ð5=G½¯{¡ûº8Äu÷\ÎE$õnFz’a÷#¸ãBïµìå¤G’sÈ]tÐ}óo^‰ú:ÈÁC/„I£/$÷ì½“ÜØhCN`¦žÈÞÒƒ–dxççA%7¯r¨Bç¾çúp„_ÕvôrRŽˆ-?W’¯µË ŽZ!´F«þŠNb¸!Ž\­îµÞ‰ºÐˆ!éÙ ôLø±¦½ó4a ¾“æ„ («½—›Kûuăœ,ðJÒšWæLœk³y±ÊÏÔ|fø‰\Ž]ûƒSŸÞqâ#Fµˆ¡è¤i ‘D4c»!åÊHQ%¯èß ƒõõ'Íôî6È.ÒUnýA·ÍÊÔöåCÁu¥åWܧœ ­…¼ã\°£YègP¾´ è¥ J*>D”Ê„¯¯€p›!ÖFԷן®PD—Ï–Ò\t³ßÿBÞ>²¯|@ØrLPú¬,u9šCÑq[ÍÃ%KiBÔ,-{iévNŒò€!3Qæ<] igöc‡©>ê2“s\n:¤yƒ|@æÎá_‹*Î*'@´Ô:¶¹Jw-54€ŸÕøŠÕM¢5ý=Œf„j‡Zõçº(IÆÔñ˜ÿ.EçzCœª¦ßøBq­•²>LòÞì Ê'_Y'ˆŒy$‰Þ)©³ñ:Ãí,•¤\Œá½³Eˆ1ðÖFeO’1 +­š?Ôv­ï@ÑÔ8•¹Ê%ÚÎèöXÛÿg»ÚÑõ¸uØV¼›Oo‰}Êì"íÙ}šáØélœ_#‰p÷O&ÙôÜ2rÐø÷Ó0ÉMwņMòó,¯ §—.\7ÍÐë¦ Mª—Ñ+Iï&OÿýÙ¾Þßõ×ß¼'JéÊÁüÊVdsƒ¼ ³9Z*Rd6Ýž¯N|Ov wQ´,³%¡¾Õ²€ð7‡ÿ‘Uw-¨£vvÏŒšs¸\-H&nFѪÀÐ;]ëø³YüP!K +5•ÆõB®wtÝ>;A\ ÉØnÕeuTv©Ëžå =3ÄhŠ·@‚p%„{ÿ⋟Ö~>¨"!ä*m‚‡PbL§¹î‡·Þô¿õ*#£¬X'´‰u1ü7v¤­ÐpƒýÊ^‡F‹j+$Úc=¡ün÷ñ`1#HuЬ-ò–ì%(Á Ý?´w¨Ôÿ˜Ë]†^d‚¯Ç_æ¡/ÕYiÐÈÏ-»iø ”P]Þc<úw7'N±rU­¬.>`"µ}ùpÑ/žåéAM÷NȬ>ê|éÀDÆ¡áK†2Ư´ÇSä?ßèkÏEmŒ¦lðõhƒ“¹bålƒè¶“FظiîQ’¥t“Ðý×÷‹/Šæèë´ ºgƒ~¥Ó\ˆ_TƶüC~qÓ”fþ-ŠPjDó!o÷ÔŸ,ŸQȳ?cB Ú!m&#\€¿ïi.Ä/sb½¨n ùÁ——#‡^ªÛ¿Ó3AýM  8Pü|Ñ âÓ&Ó&43\&Ÿ& o/ú­ã³ü-úESA†¨kqë„÷Êoé)miR”©;æBÿ:GÛ;¡ÁÇWÏrƒ W +Êþ ÃýL|ÑpËŸ/zm5íqÍ7&y÷ÛHÙ»q°›ïòUò÷ýɱd³vàv×ûÁ§ÿláo£ƒ³µ8“rWƒ$ž3½0Lä0npf6/h›ñÈ ‚²ÛÄRõ›4tUÎa\#òÙ¡iksè_n‹«'G ?DןË‘,ö “‰ ºœ8LXæÁˆÔFÄAÃSAÏuíE–€ÌwSä½ú8y*ÂÊð€Ø<ÀžŠÓÙŠ™U›zPúµĵ­<|ØêSfpG6{_.šM[¾³Kß±‰*ãh«^yü¯šþ‚¤Z»nþ¡u®Ír­3ì€Þyh`'Ýw _»!VÃMÿñC†ýîU—'Ÿ-±éƒ!#o?_;Þk©ÄÜ“ó Õ\¥y8«9 +ÍèºØÙݽyöÞtûkÎí¯B(Êt !.þÃæÁMP±½UõàDñ;ée"Ú™Žv|Fuœ,×6ô ãGÍ¡éRs*šažkÁ©È“W75‘bÕÅŒ“&.àT<Á2€ ¯ã©{û³Ãó8ŽY¼ké>/ùF18N+tP4SKñÍå{ø { +‚«(ÁtˆØþ‹H)=b¤È™éµ‰•í>½ëõÞáÙ&½Æ‰»>§ÃÃÅ—w˜B¸öØlY(ªÏ~í&Y•`,^¾øeéÊŽxvñˆ5¶¢©ŽÏÞ¼Û+XäÕHü‹=;§Rj«žùØ­J<.„šÛ?xÈ0â>gl”ƒ{×.AÁ‘Çr—<½û +]—5½bJÞÅH…uéGøù‚ÇùÒå-¨Ò¦X® ]Àa/Ülo'CZ‹ê–ÑïGõCZÎîcFðM)ÕzÕîi,ä²QMe3‘ßt›}ó‘JgéçTZ<JäŽj\^PˆðÜ®–uh°¸^mÞ!ëý!7,Xµ‚ô¹øâÚ›¹Vk‹«ŸCä/J~<$g':^†Æ“–2:uv"ûÜÒÍÒD³VÔœ_" õ›u*Ý\:]ã?ÔS½I ½D›Ý@EÔL³(îºÙÐ8ÈphYn4.yƒNâÇ… IKêTO§ÔˆšPܬ߉½Ðïã„ÐTŒªÝ=e¹5w¾ý©>õÁúN\l²%ÂCgf…ª“v'$L`DCÍÐ ’wÈÖ(%ø‰›Gñîݹ7Cé(ì€WhûÑR4 wnŒOdmŸ÷ð®Ú%µL(ÄGJó~m± çämá FÓÚç| =­ºðÜ þXÚöëFîOâ}HKµÎÚ>H°vf”èvoCEöÑ“‰IG5F›ø´KÈ4è,P_R2F v‘ “”>ý®ì+FØ£´9~ýGÆû-â}êÑ…Ò wϵZBÿ!ŠF¨;£Ý½7³Ã®>`I²5³ñ¸»»ªHÛùº)vÍõ}ÕÝÐÿé6a7cløó–þµãµ:Û®TQp¬,AGK;Y>H§(껉‘£çP3ÜÎi™ ý¸I)iì~,zTýæù ²"òpl3ïå \!œÌ÷gáM]näáZ‹Môeðÿ¡la‰H,SÉõ…GhàZ̆Ի MW XxQ­ŽPõÓè` ´º—~à +4ÕG. 4-okœ• ™g×:j1ªšâ‘\L±›õDÁ|’$ÏÏŸ6¨ec-ñ%eôcn½Qõµv»£]ZxÚ sKç!ìÞ}r<û¾LŸ.ÀÕg½ÌÑR*(¿Kh¸€’êk«›"PŸõ‚¥òÓü_éQ‘d®ø=šÀ^·WçƒÈ§l£Ú òÔ¯·ÑG<«‡—~ƒVvÈjiˆ¡Ê;^ô8¹x÷p¸«Ì,RL7IØYõö•û±¸ì»¼ß\±,6áÔg~UJ»-ÿb¿½˜ôº7](oÂù‚P‰ÏÍÇŒ!}*õ£,=Bç †º*hƒ}^â Ñ` +^£ˆ(Îmñdù£Î¾ë]§y@jÔ+êÆh=h‚{¡—Êïç3öâQaX*†áa(¢"ã¯g…5ò¯Dá´g»(•Óð¦n5=6Ÿ“TÑ uÔ×o‘Ðlù ½vlã÷?†™µ/3’öfË?èàÒ?‰áL÷Å´I‚’é¿hvý„^Û7ylÿO'ȶÿÖžÓ(åò‰éŠñ¢ÿø£õÚòoÃæq5B)ø!R`. °6eÕàÊyæÃ6ŽxrÅ"ï$ÕÈùãè^Ú}Qù¬ ¡ËÖ'%VDçg1]4òI_䀸A ús7™ &ó¡%OÓŽ›õä¤âÃmd™G»Oøö gDÄÂÜÓ]ë:fݺõ6Ä/2¯z8¹(¿ˆ·,>>ËÁç1\î&H±RC?ÍM`ù IàÉaï?Ž–¹V¾"ˆ?Âôn!Ĭq~«#cÓ¸^óÔù*’ø³¼½…¸ÕØsnsñª÷Ö~1à4¿5¸ºÊÌTá,éMùêO>&éáäàæL’ú /ø?éU×Ú6 Eßù~ l°®’lIöö”A …ÀkY߆!& dÎHÂÖüûû!Yi÷2J_ÒcË’îÇ9ç–(§ÍÁ²sÓ&”s„á@(µ§õK,(chR)²:ìp–@] h…à€òM:¸$¶©»­ ‡”“ÊÆ2´^ÖÖ2ýEh8AÖŠÑT'k…´"‰LŒw«³‹uÃPME‚[§r ”µ@æ +rðBôE±Ùä#·4­5FËi%s-›x¾F¤•ŽB,@oÓ ¦ã¢JBº„Þ+äÈåÛƒ?¥¥/ÂuòòÅOŠÊ¨CÏè–Sz´èÚΔ™Ü ÚÔÑ—)G:éò\/Êê_LÁ`üÒÔÒ€Kµ?¥ƒ#†™Žœ$“P#6›K=p;âEc¹ör©“ZÛç¥ÞE2Ã|»’z#\ ®\È“×~ +à#@>•ÍQ&Ê[Ä º(±fu] Ê(Çš¬®uòƒ!„®aH•™—\*Ñåºï0uR[µG¬T2Õò×!‰÷âµubã†äÄÈ‹j÷ˆ9ÈD‚î’‰¥aWåÐ -ÂÇóA´9¹‰š?çAź ¦P^ ѬÓƒ¥Qÿ£—$•pqä N«¼EÒ^F`jš2T1YÒ)¦èÌàüEäñ̶E†hiG=ŸhÔ¨øw°”ÊkœvÌp8NÕ]ÔV‡I®žjËãÐVñ™®¦I©.ý°¬©†ù½Èt”Š ÁòB¡Út̃¯Ð]A® Úxžø¿@Ô«×,g´p×%ªÆ3†¸ŸB£ïl)fZ¯u]¾Ç4Y»ÒêØ¥‚È{Ðd® 4|w‰êÇ;ç(¼ ‘‰kˆjLõ¸žÏBõæmõøm>»^ÞY{?<ªôôúö0 ãz_½ÓûŸÃݸžøzÚÎpÕ5­]ާm¿ÛöÇí¸!øã|öÚ­ìnõ¾© ÿ=ž_»µ{Õ-WŸæ³Åòο׫þ<®®,¾ô›áþÐowÃa>ÛûßCÕãþÔŸ†_xTmÛÕñÇþ!´(/X,n>ßÎgWd +endstream endobj 32 0 obj <>stream +H‰ìWÙnÜÊ} `ŒØî›àz3É•FÐâÀq‚5ìÑ0æ4Ù“?ÊwäÇrººÉ¡¼\ Hü HCv×ÒU]§š|õ»«›³ ¯ïÙ™¹Ð”Ó“W¯¢–e}Ýž+4¬,Ërèú–½¾~£èöBãRÁÒ[KÉ·¬íŠº:§91›rý×ËÛb£\%××˺¯ßð¹Û¢/f? ÷¬kÚìð¨/²âÍä¦â¬‡„©êª¡éŽbÛ¶ruAYõ˜u]ñ/ÌëŽé™|0¬‡*/ª‡°þŒQ_9sÌuÒºê!//¡=Ÿ¸d,gù—ÓÁÒ^§Eɺ}Ö+ºN™ –º±‡¢Ì/‡ý=CVMß§qsM«¹ë° ¬ˆßÓ¸»^î1tÃúÁÂmHtñî/ó ‰×ï¯YóŸ·ÐùÇ›Ñr[7û¬ýÀµ•3×ÐÝð4E“Ó·lß”Ø"ʤci äÌtøÏüaFL$hØÒmÓÎX¸s¤À1Çì±`ŸÎ•˺b2!AÛ߈½¶,MÿåÔõP²ö®*x…)_dä¢ÎY wGi™Q"úñ¿”¸ÍÚÖ£Bêrè©½É ²þkv`|ÓuédÕ°ê¶~KK=3<ßTßUx܆åûÈ”ƒbòÈ /+ÔãèXŸþæ¹-niôAÕz…­XµÅCQ›&2åÚŽÜÙ_Ú"?n¬k(žøG&ÞìÏÿ䢑‚¾gÕŠ*º˜•ˆ¶¸¸á®“*ê=ߎˆÅ°”MY?ÈÙãÍÁÄМž¼?=1}õãP÷¬ƒÅ’)¾£>´Ù#ã…£&C[+º©Í$©[ìVQ‰Ñü^ŒLEÓp¯æÙÃkåæÕMÑ¢T¶%û¬6¬íwõÐeU®Þl²¶®Ô°„l—lÛ««Õb Û|öèyÝõ“KÄÔý€Ñ^eUžu;•íé§™Š.˜3Î ë¾p*”k¦æ5Ú:º¡¯¾Ë Ö"âNQ»&Û Ž¥n†¶eÕæ€G½o묺Ï@^ÝñÔQ^ÝÔÍAÚló-ÛUQAÝ5Tä¿ØdeU÷êîÐìX¥¶ØŠ¡±\Ýg¾,¤›¤6hÝÐ:µÿTw’VÔ­ÚïZƦ§l3ôLݨYS¥±|ƒý'k–e™Á(9i`Aû¬Û %­ÈóøäÇ!k¡ÃowY¹>ä`‡nk¨ÕLÂc0ÛÍ@ä7˜¢xj¨‘\‚šºš2¬$3õdÒ[ +©¥p±œÉ,'™¤ß©—äfVBa%V3…•XÓjÒÛe_4åA]u%¯‰»1 ;¡|7S¾›´Þ‰ÉÛ]Ý¢Zz{…RëÔL8ÎÆx²™v&\g“‘ŒÒ‘1u3¦ƒ u&¬³£2L±I¯R…*f.ŠI†!•pX ñz\S=S¨¥È¤—É„ê < OÖ3L:1ÝS2ãðéÉm"ú¡ýçõm‡Ã`v˜kj4Iµ©ùá}®¬Å‰9œïU9¢Ê ÑkoÿvJÚèY£®x¡¹=4ãéB–×pu…üöD™Ë†æ¼õU9`ò—¶šeµ…Ÿ×âu´eŠ˜Å =âwËñÓ¼ù~̶8àpZá SØgtBIõÈʺi¸eyÏŽò׬mžãåªÌª¬Uhâk'W á#?TÙ¾ø8àî÷¤Ù_ tí« Ïrz7lÓ“ÉLN(0›zŽ«ÛÁ.®îÿ %è¿ÅŽ^ñ<{S]°Ïì‡KÏúÞÚÐ軯SÿÝ´uÔtΕœ)|×X§<É’P~ÎÚoûûº,º=T§{6Y›†ž•r~’·Õª´C·Snëºü^1ñJÆC؃Ã[¾1m/”>o“ðoz>p¶àÔ£êóøÓúŠ2¼Ö œš¾‡~ìnsŸ»üŽ•gà†öñùXl·CGµù•æÏåoÁ¶¨r¨Ü EÏMÈ$Öû¦î8äÂéLøhÚæ'Åü,8;ûíSB×”°š‹üÒò»êÏùª|Pªºha|þ„£ÉPœޘñ  )¯Öa+žfߎbãV£YþZ¾þædØÍµtL¶ò-iŠk„ñ1ÜqÅ߉âwY©<5^ˆy O`wŠåXЦ逡™€¥Ù€£¹€§ù@ …@ÄZ¤\X×u0K·GwÀ‡‹!é1©\Å0 °Ûpð߀ˆ€ØH€ÔHù¢ð‘Â]&¿,ÀÀ<Ó "  5S YX"wÇ-~Ù>q-pðˆH­”§Ã¦m,˜;¶¹~9ðlïôÄö€ åpX©r(p€CË Câr €<:>!!"Ä„H9\Ú*—ÒïR2]J‹KÁ¹´,Ž÷Þ3ðŒ Ÿ<ýñÈLC²Ò‘Œ ˆ±d¢N,´ˆ1Ð'îEÄ»”8gßP¼§'Ä7ÁµP²LpL°Ë"^¬ +ˆO’KÄ£‘C6q¼|âÍÈ™”Ø"˜bIŽp~ø’‘dD:1Á” µïQÕ‹šd¥§²ÂEm[²¢E%‹eÝR½Ê:åõIu‰Š²e-Šúk.’Õ5V“.+f¬ gÚa~~D±Ÿ©ŸØÞìкx âM„ÊàÄò¨ü +ˆHƒ”·UÞ$yËC{çí‹7 jDxNI¾Pî˜ „üŠ€Hi˜FÔœ#l9oœµÀQ£‘Qmƒ¨-¨EAD´n,WLH)G¬IP±ÆS{- ÑâdSŠe똨ëKár‹Q|¼’Ò‰6ƒþÆ@™žž$æXß„ý|ÀâñÉ™1óÿz½X|±øbñ±øÂÌ‹/F‹ÄLÞ¶ôÉ)¾oÅ®£˜šâ˜ Ýv}.qƒ/ÚõwÂÚ GÑ>þ¸mS?Þqzšv´õÿôåå—|Æùñ§—rîOOüÛË˧}ùüôc>õ¤¾{|8;Ëo´Oþ³Ü÷ýþvœ'Êèaþ"Êïƒüã«¿xâ;56øßKxOѽ÷ÛSh$°§¸žÂºZƒz*ÃÒçÐUç&Çr'‘|Jq ¾ýP·ÀŠ7ß¿^±=‚/Î} ¿¯øûŠÛû?Ä6›Ê¢:ôÆÆ´]O•m‡¯ð¢w­ê;^4 bŸbÏå“fy÷Wc£T¹Ž*ýÔ™u±ýÙõsÏ,r(©Çóm©º9m©œæòÛº¥þO[AQGIänŸÊ%“Éä%—4n×î%ƒHö–ÄÝJÎ&wä|=H&ñ’JÉ%J’I'©ÖJî\ÓÏ´fA:¼ ÚªX~Œ1lLÄ’‰à2k$žÇ/›ÐäXÀ·PðbÕ18Äb=±Æ||'?‰Ú)¾–®¿‘u«•HC}œùÆ™¦Ñ¼ céÎ +!Z×G>ıµÏì@÷´£Ò½_cÿFÚws×î©û!š©sš©ï¸ý›¡Ù·^±Œ2y×A1 Õ ”ÓPOu eu޳?BO’:Díµ‘Vª¡°MÀ¹g¸9B’~|€ô1ÝÓ^ä öZ-ˆ‡È,Gñ‹`Θ2r>cÖã¿ãî +†Û(w- ÇKÖ¼¢÷Þ +ù£D1ˆG”†D±@4¢x€xy­@ :„)ÊŒô›1d¸Œ­ø +ǃýÉ"wÄr?VÙz›­»8úUWÜÉä7ã÷Î.“QŠIf‹LY챘ãÎÏM¦¾f‹ój‡c6ÂÍ7Ü,P Pìϯö—ÌÏÂbÝW°¿7ÅÈ÷ÙÜ3AîÌuîýê›mNñV?³¡Cüî]FwønþÍ·\jÛãéÝ_ü¨±sj¬œfñ5¿621c?š±5å»qÁ-nAôntΠ΂Þ×9s#bÞ aXÀŒ|úan úLJÁ ]´%¬Fq› v±3˜ß¸øAkmo 謎†d•mlÓ‡~³µïÇž~|臞÷}o@³i´/¶®éÌfxo¸Ã¥š!.?nŒ–òbw +4&26t ˜A$`$StÛè·Ñq£ç&×¾7z/î+þ8zð,S(l7.~l%@Ëf_Öb²âÍÙŸ“CÏbXÙ¥A|ÈøáΫ“[ýº½é×µW³µPv<¸±ôòxT—¡3r(PåÈBÑ„&0Cüm–Ek§~ÅXÁ­*0¬hWô;˜º è\Q°»Üôp°â]Çšl‚¨z*4.W“U>8—ùyÆÎY¯¹™™Ù&f +/—š—ÂËÄÌ.33ŽHÍKŸy9+{aeád£ÈÈ Œ„“Ѫœ4ÂÉÄÈ61rÇÇÄÆqÇFSØw ÏÙ8^a⑇ªð¦-kVN<ô/ók„0í +W^òÏìø·gþ~ɾ÷æ î1¯âS¼cÞ›Xw“q¬˜9÷!¾ý +L«T½LR­íEÝËLmßfñkv³Å~Ê|¥ KŠŸæ,MÚ6kiÚŠò§™KS—æn,"2Ah‹ŒS$ФPµcæ?û;ƒÇþ®‚º¡_û{’ÇîŽn£ì|xÜ·ôã=8Žý>æ±ÇeÀcþÿƒlÆ>ûð ž‚Ç}k²#½ùQû}Ô~µßGí÷Qû}Ô~ÿGÚoWý¡˜¦c²‘ÇXŽpTaÃɲÞ[þŠ=ÝÊ^¬ «6ƪ7 VUbÏaU+öÖ³ØÊcØ +…É2—Uõ˜Ùl峕Ñ2§U-™Yx4eæµÌl™Û2T~[.s³\æ9f:_Ø.ó3^æ42•€Äqx^>ÒØ5y9ÚKy!;áù© ;!´›„^µÜùÇ1S²qXU‡mß­œ7ä©„E ¶4e‹+ÇÔdK‚¾hð;§l£¸ü-¡Ú?’dMԛⱑ;l+§Ý:§ ;d…}5<²“¼_ú…#ñ|H±½DY°“6ï#5#Ái¹ÜmNs¶1ˆµ³e#'½pª°Ík' ÉF#¤Ž¦%p¢ [ šɶ—(vÒæ] f#0ÍÝf´æêMÖÓzèJšrα ˜´‹­ Ú¼ï6rC±n8Ž3t¥·àòÜ}ü6žÌTÌNh>DjF‚ÃNí&¡9Ó¾+%Ù8 ꎖh×@l£c²Å•cj²%Á_0üÎ)Û(.K¨6ÀdÔweK‰Œ ˜Û Íû^iFj(v Ǒ†®d˜í%Ê¢9áy© NË•ànsš³º’²‘áFhó®Yšªù †kú®dœí%Ê£9áy¨ÙLóA`·­¹Þ!F ÙqYÐiËy$u>qäëYo2®L@Vu„Ü”“M‡ß`d|îqDzJ:Ÿ?²*€´;äG‰Æ@ `’4¦±²Çö8¬^I{\ 1(*í­o@L¾ÌÛ‰ôÉ ÊIpBIãÌ]pº‡ŒˆJºãšŒ¡@EYocƒ‚Jzk|'Ï{(èÙ,t •Œò­^d¦Ê¾^BÍ×ó±TlsG*è¶HÊ/U '¥K­b›àóe`¨Ãâê1¬¾zŒŠë·ÕÊ,®ÃâúýÎ[ãâZ2®¡– Œ«¹Ë0®&ãjîßž¾==yŒÿ~i^<>\<{yssy}(— /ž¾;<»¾:Ü\ÞVŠ"&ï&Ÿ»vrq>{]cå“\†CÎW4Ú öN²s&{vòs¾ãÿHè„íitœB'rÒjNlÏN~¶¡;ûZcC85‡†˜É¼8…س“Ÿ)ôjŸÜ£íñkZp¨ wla÷ìägZðÕ>…†@ˆÎΡÁ„ÆÎ;i×N~¦Ð«¡?G{«å‹õÛH»VY7§Ïöi%ƒðQùÕî(ÿ»G$;c»Å.µr¬8÷íØ£•·Ú3Nëg?{öu^gJªë‡³Kiçºïêúá¼²*¥É_gpu~ö쫟×ü(BíuŸPè(œñjvµÚGH«Ÿ=ûê‡B;”µsÖù|Kó*vöÒêgÏ>e Ú’VÏ¡qûJñH«} ±úÙ³O¡qÇMÆÏ¡q/ŒÑÍ¡Wûbõ³gŸBƒæ¢9²àPt!YðÕ>†Xýì٧и §ÐG—z +ÝÙÇ«Ÿ=û6´‰Â{5/8t]‚›C¯ö±éW?{ö)tÚ©¹¯ nVÖÙ9ôjC¬~öìÓ‚ƒ¬-”äTë‘-Ã\ëÕ>.ìêgÏ>„þšó[’‹˜£X¼—ˆÔˆÌüݤ$5¾M¤wíÍÅùìµjÊvþí‰KmY­&‰„3Gã2æYv”_Ù‚QÞWY“rec&0Gk¬"šúUõòõγò“)¿œÑ™¥Ñ)#„ŠÓ¾<8±4yÀÅ2Gìv©/au-)®#YÕe£ZUÑú€X„!a¥"ÉTñ qª*ª¶HÚÔÔ¦ux5~ÈR9•j„r‘¡uB3M¼«pVk«ª³r ªe} Pg™¯½U6ÜYq¨YjÏÚÀä—«‡h™÷v7{biQUöºü!øD=V³Hôfà%²9â4§(-:˜ŠI ·ÍÿUë¤ÊçH$ÔÛ‡öhÑŸ™l‘AªveP˜·Í^. €n°[ß2³$ëHp¦ow¼¿mä…Ýie ¡Z˜¼p é|¡Å"ˆ }#/–õv¦pŠ·QÑVcAÉ[µìß!•=?âÕ“¨1©˜Ø³D¡jĘÓ0«ÝÖN‚»ªeØ¥“¦Ú•ò¼s¼Œ%+¢V†Œ2©ÿÑ]íÊvÜ0ìWò™!E=ëÓ¦ÎLêÛÆeŠü}@‚Ô®s“6¬Ã•ø#8u´¼ ðzãy¿±áû#âø²þàãØf¼}=x“`Uà† ¯þsIPd'Øf/Я÷©bËskÖq«éX3#"_Üðϼ‰ß5›3öÕ«_g«tŒÙªÍkG«o{¼_Þ(*UïÞ>ã@n•OCó¯èéžfEͲ`ÒûI‰Oð¹~o¡%¶¯öÙž¸}ï,ß@zß¹Ò˜+DA"烃Þg6 [Û¤`=Ù•¨bƒäµP‹Ýn/˜K‹¬¹õs#74”(q]OF0ÆG¢Ð¶oÅÀ1Â2 ¦»qø•Eâ²Qhf^Úÿ_„|HF˜FÖV4¢ŒÒžöY'ÁyÁñ3r¦»­úÜœ#Ç+Šœ`/ÐTŠS=Ÿgeêö™#Sgív´‰æ2Ø‹3düÛéÒnùÒ[³Ö •ŒW„œ ú<³1‚á¬ß£_|Rou¸fjẫ'Á’`R <ˆøtcD°Ø ‡’ÕbÛdOt ܳ?«öø\_µ?¥Ÿ,<ÄTË œ[ ;Gï°™¥eÙôÜ>þκoÓ2'—¹P|]—Ø:7šxŠ|€Š,<(ž b­óÄ\º?ÌÈø¼Ó×§¾(#È˪ù½öF¬µ\b6j8gXï» ù‡Ž‰¹‡“Ô+ Û¹Þ„»L0ZõS¬ ‰¿Þh¼Wà±×›Š¤POµcãÁñÐA|VN.Þl–ýàà(pú2ø”3ÁzØŒ°:ðóºR@k}Ý„ôq\Ýsº”–Bÿ4öÀž²Îu=Û§6z%¸éý"î‰=ÏóÅ6[zªå}xqÕ &èuZaiŽiK…8ÛIp¥èÛäÇO©…#/^<³§ +b£‚è•͈càòiÍýtæÎøÇ{Z©6ŽYsëï r:²·"Büé‡Ù‚†pxë]SÀ·8ñùù¶×­V‹Sqà¾EžNÞκÀ•k†àp~°{SäbÝ“ÐW aÇ»÷vàhÞ<MÐê}#4§#‚:Ÿa9€Ê¡ðQ÷tEÐ^à\#Á®%æ‘8Ù’v`ìÛa³ó°`ιX`ÚTzjßPê_eæ’û|n·S¼”¿Í•õ8ó…ÿ(RÜf⋤ìÅ›¡Þ¹˜Ÿe¥ß¹ž"ÏqÛš‡ûX öšDM0–Ô½ú·Ó0Dí .›é".õ¸æl IöòÝÚ½¬ +œÄÓ%ÿbÞKÊ:®åqkrM§Ê‹”§õ(Š@>IÞŒR=kÒ“&uÓž˜oÕ9 ÖU½qyuN— εæ õµrïõOœ-²øÆãÊüGý„¦Æ7r[qä¬6÷Qn–ÌŠ½WzØ/Ó¢ºÀãæyØ¢=Bz%„‡û¡3/Ÿ{m×OÍÃÃm 2lÃ:,°i~«ËC'è„ìr—žíÒóÏOþã5WcœhSÉך¡×÷i‰Çü²'±~bÆu:N®óÌœ@è—‡M¶fK«Ã±°#ÂÑd}˜»N +ÄmôMÏt.K+.³aCH8rXÂû}U„Ýö̇ZÃYßø4c!™ÓÈ'ÞÛº¼òù¯ü°‘þŠ?­šÁ]ᬞ…·=&ñ)~˜šv÷b|Äþ‘‘ +®|~wZqê]›¾ñ·Š|ŽÓMà‡¬ãݽY~Ήƒ Î9ònc•„öËÒ±Ú‘™w¾~(µð¦›ì”{­¤â6±ô$±êñ w5‡Ž`>Øýäy¾¿R¡ö» ýSç¤Ë\Ùæ½×š€Ø/ö¹/€ #~Û®h2“bŸ e­ÌϪ>.›2XƺRgÇ BÒkê?O̽C»´¾Q4yÌ«ÕÅr/ÄÀ–RïNÌÃa¥¶Nzž<úÜž<9¼+n¬ybÁûîmëÏÁ ×Ya—æÉP*yDî#¤Æ­[ù<€mszŸ¡ÈøçþÞxe—œ;üãi´9»P¶4ÖäÞ@fx¤?EߪÃ7lK ›O)AÌvÜ Ïm¨qü‰7B—p]'5ø% +pÂ$þáÕΣÙRs$þÆ€j÷Ó/á¤òM¹éþ~ÊvÙçìÌJ$W{Kgús»íz sd0ÖûÙÅ`< õ±T¥AÜ”5t]uðcékœhYCË@¹cFÓ­à‘J±Žð=“Dup¢%)ð¦Ç›}Œñþ– ¼ÁŸ¥V#?†SY´8ÜW;yMçÔ/¿tS®åêàÇ;)NÂÝÆ-î’¤Üë+ÐýyYøÏæÍ§AñfëKÕšò=*#ápͱŒ”ub¦ô€%;‰ôwŸ¶‡ÄN«7ƒ‰}ÒÀ]m×!¼²è«íø³Í±ì‰^ ínµ¬À“'¼TÖ–+°úJzz.’á~MÚYù±Ô¬æ +`î¯d'‘mù[×x6.¼¥áH”#×;HÙY´²—ëµ õl\Û­߻¥ +ãaøñV2ªeÔ€KœGžá;ù$ÎÚ>]X¬àÒ6z²¤urœj»q@öÇÃH‚òŸ¶»Çð2î¾l»îyœã1ÞÐWrŸ÷¢ˆ¾²?Âà‹>„³rp…a3öóœ`T»ž7ªÜ}>îI²†ÇéJŽ/ap%èÞ…»Ú¾cÁC,+òÚ'qÂèšoÑ‚ÐZðÆ“4Ö` {Îl;¶oTɯÓvz'Í«TÛodâSà}ÉÜø9ÁÄìl7¯|nfÇ¥‡ +×þ²pg¯ì[ÀþÄ&šFÎH\èäè3¥÷ôñRÙ`$Õ÷Ç{{úız&ýŒ¶C`̵çjHhœ»F¾ÅR¥Ò/ëÙY§ØPúL:0PâèÃÕXCtr ƒE'‡moµ}íÅNÞ¾4ßhŸaˆªí)©íy#´ª9ªÆÄDa:•Wƒ33»4y+vúIôG„ýÉcÍ3ñØ^¡ V;Jƒ•¼ +PZt¾Œ!Ën8M©Yª%OÛ×]f[åÇ´¸Ô¡ó·¶ÞWÛÁ"; +ÆÁÉíN-Ñœ>sÚ1s+ÚÞLãªíH' Póã>å þÌøvÖâ c…m‡àÑSJ¯$…ϦY„|q.ögiK*;&mÆ+³½*´Û …—·¦ š _ëñÖák!K("W Q{l\µ]×åÇp¢å¦¶7Miœ(:ª´ìîJŸ¶Ó³§-f`{ºZög›i3ϯ¦CÖh`ïm©¾2´ç¶À5TÓél‚ïHŠñ=õÊÀ¶šTsc]ž¦Ë +‚o×ßûÍä»úùwØûáið_þÓýö—¿~ûÏ¿+↫Å9!o$~b"oÛ·”^K€>Èÿ?±Pl3?ÅB3îÎŽñçvdÈeâsù[<6y­–'ì•BÒ†²àŠ#&êÂÛÁtUmеÑòä5à-Öt2¸o+ ƒu¿Ê¹jûɲ5]©Û­hÚŒ¬6Ð×Þù%;#pðçå-h@~¤» âcÐR‚SOðÊ,´FÁÏRÏpƒ¶§NÄïíçž©Ó&{o„Ȩ=­½˜“—36'$O­`ó#T—¦,%‚ØèÃcMéYÖ³>®NÂ!,7yø¹Ù¸ÝR=‡¿V^Á†Ç©Àj£Pzi¹ °i'8|#uz-¯íeÑ€§Êšû_kÏxŒQïƒ'tòçµh÷œpCÌZ%«áƱ´Ñ|e‰jñ«és;ÝÊÎï~n~<’ÃÇÞ½ÀCÐI¡j¸#U¶å¤"èoÏ#ñvƒ^ö ½–Kƒ½Ño_A){Öp³˜_Ð'‚?†C\ñû–`jqïíÆÝàÉø1ZM!í(„bNr2±²¬ú=O ¶]9wðÄÕt‚fàYÃ:¬v,Ù/1_ƒ·Ó«i~ ˜¼&|VjcäDtréš/!•ÍÌöJò¶gŽç0ò>"é)ÂßY4xŒ²IºÁdm¸õN--]ù ¸²t.÷6åOdÇ–]ºXÔ°ƒéáÂzD!GÈã03”'l\è±l­ EÞ%&Ž]š|6…Û‚´´zÒÿÜ÷¹ÅP=q»Ê˜4I.Ù—nÔïä`ëX/«21`oøÉ"¿læƒn´%NÆXÔ 23‘Í•/òƒã\—ÄQVNó•ÑŸš4rHtÒ‘'ܰ“Ì•oè‚;åžn]*1x,ýž'LÓ‰xëuÔð˜ ~ö$8ÛáÀöçNèÑv ¡\Ï=ÈÊ­ŸtˆrÜ>#êDÏÓö»S¶"ÿ‚nÎ`‘_üÎ89–Æ'K³`Å»DaïY½éâmmaK2Íh#E`‡.CÒ§÷5ê½ÿý7ä>ùöOûh;î½ùëîìþ\ÿ±~Å?Ï…í6êVÀ¼'£›©Ðr×i   zÆ1øŽCQˆ›ˆ¸Ø.ÍŒÌ ˜û> ”}å ;ž%ÔØ8ˆ5¨¦ ±jwé9Þ„Ák^ÛÝøYɸú#q•˜‡óP¸ó ·*‹;Nßç‡ »$ÑšMÁ¡C„šq¨šÅŽqâ·Ñ×GâQ7c/Ñf6IæYÏ×"$˜¶àœ=ÅàJ‚7wãÔsÁZqrvv¯ó‘øJÿç¢ð16cÔ²„Sø>øÜ±fÍBƒ·Ï@ ꘑ¯¡+׬ÁÝ Ï\¸û5±ùH\nÃå$>`R†éJ}?_xÀË`‹Jf,|#·I‘†‰N%ž6w‡÷ýH|à!´p!Ç£¾Í³ðúWwü(Žì›O9Œ8 ÄnôôhÎˬ½ŸaqÖ”êö‰›}šWÃ1=µg¬OS&OO@Pål%xääÄ£¨¬x—›ƒ&ÔoYÖG2× _iPûU}ð-—t¼vè‡´Ô £†J,ã&éH0\¬ËIÜÍj{nÏ5Â"´Š™Ée<3ß3s=gwÔ›ï>Ýsàoêk¸Q#ºðä¶5ߋ㴌£%Ñ>Cm2ÔÆ‹¯RâS'<¼Lüã3n.òôÄWnÓ‚Ê +÷q¿Œ¨1×k*Ùï6’u弑ø—JÌUézý¢íÄoðÛ&™Ó]‹x:ÒŽê:µ -Ë"_+!þ¥Û¯‡µ±b¯xõÆo£†¹FÊUóð6í¥ai°v_+Iüs%øþ¯^áä¬~ÅÛ!~µ´æËAPäÞ9¼ò›ž$þ¥¼;öû¹;ü×oð…·úßUŽcYr}ºÃ\`¹/vˬ È.@ÞÜßUƒÌ|¿~u ÐÀtôûL®Á #û€w¦÷I¯Úm'0¾/=Ï­”¼ãX%þ© ’ŸpÈ%j7;¡MÚÅQsIÆœ;´%ÛhÀ[=»²ä`×p¯=Î^X]¹}µŒYÎ#ŒÄî;T@IåD—Aï–Á<¼ûÞ,o²¸ÇhBI`ŸPæøíD^œç“Àßú„[8ú캜î^q(-ûÜ• R,¥)pUÝ`¹Æn + бÏÒx³|ðÅÐf—â\™:x¥NgdÁ¡Å;!,K‚4íÏ ¯±ÂKóEûDÏ[h<â• n¸iE¸»À½¤ñ®¹Í©ö«¤ÚêcgB4_Ë Ÿ!|ù- ÐÒ +p=-L±ëWË…:rÀéíæñ§)öù3^py"è‹·B]M­)A=¹jõ4x4¬.`Nݨﯼ_Ã7àcggÀR[s#m‰R­0‚ é—Èw”S-àv)ØÇ­åóû.ú®Z¼gi®j†u²ëU8Šuë7ÈÔnµàæÖË©~Ç’ËŽÍäiO¤š¬n(~NBS™NÖ…–á÷é{ÙVÛô•a Ú7ÍR½@š6ºmGKoÂ!òb÷ ÊðÞ¦kŸÕK°4Foäs›«¥Ýð*#>¥ªÓ>,HUÍÍj•£¶¨ÏSŽ?;ô¹¥Ý>N½°7™Í­õ›ªj¢Xä1ms5—|. ØMàzþ>s¯[lipx ³ø`.¹¥í|Vh¶Ü]^‚ÇàaMØØ gÞÆèŽC'DÊòǬÏ|=yÄðÿ>£Æ­b¤§U›¶QøÒýÎ)h4®~£ßW[ ÜfŽ3Ôá-å> §§eÒµB!{¯cÓ”=Ô:#e[/(œžô¤F>Lñ7HeÕØºT*K}†Ñꊠ±6@¸ˆ—ÝB2Çæ†ä|·P"í ƒÓœb t¹Wº«Ô¹0™Xa¡‡ÀgÖåJ‚íd.‚ÅÛ `+Ù¾$±Ã;CjÆð¼µy|B-›$ ©¼òVáw‰-r<ÁÏSR‡Š£(Õú²RȘ[éö4ñµŠðìú‡fÕd‹2>EæôGíÔ2iÚˆççܪû€û(B3ÅKÐòø;|@µg¤Ì×BÆ +ˆFªÚä¿8¨lÝðVZÝ+”Å$èNÑHp9åPÕåU/”™Ü±™b¼2˜Õ¹`Rý9]g6GxæCUMRylž(*k ܸøÇºX±xPª¼Ý1üÉ[ß=|ÀH'ž¥{-Z>Sï÷ª™Îè–ƒæÖ@ž>vREûíS/Ý‘.Ä(ÀêARìöTÝa£‚MѤÖÐÚð,-y€Hd¿ÃU™?³['­tí5| M;}¤q|ƒôNÕ2<îÖL5<:#Ÿ >YÅîTR²€¼ÍºëÿˆÁ×ú4#ãd>o#FøæeFp_jM$ óy´:glÞD5Æs·´¿úí2öÕ·'Îõ¨|þïßÿJý'Ök®£ø&å¨ý£lažóÜieÇñYMFrP§ÐŠšöW¾}â1Uu +gŠ‘ýÀ?/[C#<ݽÔ㹩çÞ}þm1…°öÙ‡|Âh)A`†ŽÄ.á£x8¸è’M9²‡l7—@;uN8­Šq?áŒ, #!Q`¤Ô¢çŒI¾õù„“¹3ŽR"‡ŸpìpÎ󡤠Κvóø—«;È®;©Í…ã›¶5#ÙE#Ï¢Z|Õt?-¨âDá›ÏY?n8~:Ç‚“{d_è2®Ni +ôN¦õnù-'QX†¬žT4eÕ?︉%wC2FÜ~nl’-Û$@Ô=o­Y·üû/^gPÛ¥þyu«Ù2´ú€ÒìÓÆ](„¦¿^<ýöÅëÌÐöæÌÅç¯Amùåmüˆ¿>ýfÿ> ¡4Jyúâ¯OP@Íñþúô›ýûôŽuñåé‹¿>u4æú }úÍþyº€jÖ7 à¯OìØÆÄ_ž~·Ÿ†úÛm¼?}ñ—'H¹kýˆ¿>ý¾«]ÉŽþŠ¿À%zÅ'=±«o•3§÷ûo#@š3ÚqâòöÎ’ é>âïÔÓꃌ )“°}еR@†ã£ù‡ŒÉË’6›ì{È7”´õ㘅*©îÈ0©ÝðY„Eí¥)@ûÒ¶GÐYü0VZñL¼uUKÕ} ’dÌ +æ\èÇýä{U×1ú+ÆqãRT«¹àï;®Ü‹ù¾ÇyÂwœ?÷PPÏ’úH ®\G¨g|Æ9JÝø‘:¡ …ŽÔlíHñ„{œ#õÆÔê:ºœ©±ò„åLñ„{œ#õÆÔ +ŠæH EÁ™ÏÏøŒs¤Þø‘:™!¹¥ž¨{ê–â _qŽÔÿsOMš{9SwŒóÚÿ4¥C7\†í€=™õ=¡u "üzȸ‹ÁLåÊßCÎÔ1ÎUsÁ?nÇyÂwO ¿&£œ©!mÒ8yý‚ä6¹àœ‹¯¦IN‚ò<¯`ãýø6ã¢V«"Z[Ô­¸qè"Šž :ŒÁŸFȇÜõÆNûä²ptÈ ²X&«©H×Jr1©«é¤{y\¦Ïù¦æ}žÅƒPZ«å‚W“º~¥yqVÄŒZ|(µAÎÚRiÊ.Vb­npÄ*¸àÙlÛŠœkª§â‘«ÄÇp@ù¨ùuN+n‡DÂìé ü>•ÀÚñ˜ÊàçH%ÆRvj¾R ·âxçîƒ)]Õ‚¸Òöã³H€*–^÷â;þ×óYä¾+9ŠG¸Ó]|J<.ø +5Z"ë´âT3Ê(u!s³¤™Ú¸SÍÂ"wƣȂ Õý¸iŽXàï}ó '©Q’u¥gq0< l¹ö³HÇ"wÆ£HxÍÄ—U… +&.?S¥¸±2¸HÙŸ>Mn §ZÏb?ŠñŒ×b031˜<.¤ŽÚ §>²k¯'d£=w†ƒm>_ò^ú +õ–jsõ6ÀVѽpP.õ$H“Kù|­xž®3µÄ~öÜ{1ÅOñ:˜gK!üÍW(ÕÊ4#(³Kq®É:ߖ޾úmô"Ê¢~äÆf='˜Br(ÉúÇè~Ì·[+isu‚®‚(]Àä_Aν›Zûu‹@zUsäázR©ÕOWÞá;&Õ¬û›ƒS òÉ•Vh‚;å=áWßkh¢¿µ2¼á† ©öl`‘±î·Æíè[©°Õ±Þ-l®"#Û[¢Ö^Pà:ƒ´šíl„¥Œ0ÔûèÕp­ä5äš«] aX—qS™®ÊÌ"³X$Ç,ù›©›M½ïR6¼Ú.÷S´Ùëx¶VZ÷œ„ œÎpEà17ð‘(:µŠ¬äÑ„ìY=Íš–Á”­6ÌjtCKÍk˜ù{-†lŒ*%Gm0¬6µv +*ó5§%øB';m!¨^}8gg}K_§¿ÒÎæÚŽUŠ3&¢Ë>£1Øoiôµ8ù•ÎÞÿ ,€¢„u'¸Èx_ÒüϺØ!þ4ÄiœšÕxJpKœäî>ÄŸ¬ùI°ƒ®fUSµ½ïðl½´Èã¼t$ÿ}nth­õîÚsY¾7vú8±Åɱg«MˆÍÄàã’º1Ã]$D%÷­qÁWñ£Ú/–§i6ã4Šj þáÏÆÜgl•ó…=òû!ãõ…}} NQ1¿^+;8€â1‘üã©´_‡ly|¼°jÕ½Òuûñ5Ç«m=Vâ­¢×73.O•Ï™?^ÌŸŠ‘¼ÖÝf7.î$$vDÖ÷x\Çß—Rr+¾^aCi—BÉØk7Q¬.¦½aZ±l>;J¼|—rÆ}?ä[¥€±l%««Ä3Ç0Ç6gÃ#+@¬·¦"5@ÈDç–}ÑaÃËâJkYmèÂÃ2uhyW°&Wf¨Üj`—ÔwÆLgy?œq¢’.â›}M*¡†;Ï4hò¼‚d`ýÙÔF:˜Ø -B¼…ê´ÞÛßqÏø~¨äZdW·8CMVßE*3n|…JmÌu%Ûh*–ˆÉ@‚s?“›ÏÀsñ\㤷2~­ +ñL•ørØ¿W)å¼ìIŽKçöH³1†ÝÀ$&‹kYY˜Wxů׸+yÂw%»ø"å¼^ŒÀÜz p Q¤Œá73Õ¥än`k¼z@%[óëí¥”ãã¡kÝÁj“å5¼jÛeŠêål]Š7’T±‡f”ÉÀ¹ ¬¥y+bÕS$½â×;<2F1Ð¥ˆäñYš Å»º!+†º6’‚£²?t©S"âR1¼ýÿ:#ïb¾Éx-Ûx^&ú ¢ys1 wÏ©yÓȳ1ä Q µÖíËT»«MÅ…ÄqJ’ý8ÔÔÌ€~9RÞáBù§¯Zz<„‰•„ÅÓÈ_¿7 ¸mêBNKùµšªÚlB5v³:Ú–ÍΪôÛ­ÚÓ½` À(Þqøbz5£y€Í¤„‚8EžóÛñf%¾‚Q§¶qÜ& +[{F´òË +Ƶçì×>ź×0Õ€q)JÿÏ"cצì=ÖÝÝA)bª$Э`ƾ9n˜ÃoëÞŠ{W 9³¬±|Çá¶ãá–»SŘ9”MY¨´çÙÿ 0ŠÒ• +endstream endobj 33 0 obj <>stream +H‰|—[‹ž¹ ÇïùïM¡-ôÁ’eÙ¾ÜzÑ2¥¥°(¥,ÍöHr±ÝíÒoß¿,É~çÂä7~$YgÏ~U¾ýª7’«.·ÞgÇOÜn}ÿÎx½˜©Ÿ×,Z×árµÊlp\SûÈš°kÓPÆæýâ^ÚÍ! QƒziQ Xˆ—.¹*I»=¸„viåa¼]eÌ¥®à;mÍÅ–Qèö_?¬×à¡Îë(sî±Äa­4ʘ;áa0ÌÔÙü"$½…º*\–Â3`¡º`ôL rÉhÃo7 +¬qØÇ ¿M’€0}ø•‡RÍ+WÜ®°K˜,n0_ƒZÆHÔm ‹´ÇÕÆÝm„kh ƒÉd4—´b‰iÁqá’ä6ôæ·ø› Áù2#L£ÂÉŸ’7qKôêÂ)¼ +áZ¡Æ…’¸HcŠ˜ÞsDd°ÞŸk|‹‡%ÛÈv9Ã-¥Œ‘FêE$5¸r»9¬}n+dåàÎ%B¼RÂá÷ÆÜk|‹/KŽ‘Îáûû~J^ºö—¢W$Ëá)ç-~älÿЬòRµei/UþTН }•Ï4µUñd€Rá8ÜF“ÛSKÔKúá Ýø_~óþþþì«ßŒ¿üúóǯ¿ÿñ?ÿøÃ·?üðÝ÷Ÿƒ~ýÝßÿùù)ÿù7Ÿ?ûé»·ú‹÷åöÕûwåöá§÷ï~\Ëneýùð?ûßoñÓ¿À~ºÉíw·?ý¹Ü>⣄Y}íj·:ØDeš×ÕójŠD<üñBÅvÇ)ã5v÷ýçcßïíûC·/Áƒ‘JLœS^nÈ D¼²óÖ*ÊmA´ñép(²ß¡v^¬wչܾ8“¦dî‡ÙÚ¼n-Å¢†®ŠvžʅЮ ¢Ÿèpu˜ÖÖaQ¨‹“½ìmŽ#¡Tª!™j wšÚâ"4g?¼Î!®±Íº4Â÷g !«ç? ’­K=%ü†ÞU«ÉR{Ü¥ÕXp•Ü +-Rd GŒÒK³¤m½7’Ú±t( h%´`%ÚR™â$ÏÚRÛøªÁÉà‡1È$.mQð“\«fR zGB×ps“PX2&-oö"ݼ‘¬_Mr3ˆñedbENŒé|Œ¶=W¯Ù%î¨c¶;nƒÂ³¦‘§ºN£ ¸ +¤Z3°³m‡ºãΚ¼;Äʈë0r,àbyž¼Ãa.»P"÷1ÿJO/ O.ÀfC~Êö“•jšÀ£ÇÙJ •$.¡½Ìc‰ïy_Z»NTQÕºrKX\ö§Íð€aÃ]U#/ïfÑõHX=ȹjÚP9S YÕŒ&bpÞI¨“"):+eas‘³¾8.¨=klðNÃ!e¬cÉexzb®Úòµ%gºaòHÍàgF4Ä‹{@tÛ8I£ìØÅ9¬’œÒçÐIõöj¶ßÂ3o~Ú\vþ´-‡ëÜp»ø­:}ýðØ_Dt™§ud7±ø|J>ÜÆýš'µ8Üëj bQt¸‚óàlöá£b¢BÕpsx̨\ƒ¡«“3²–°ã b­¢aRŠ-Ç Î‘`cš†©—†|ÛÖžì‡mï{ÌKW¡0¹­…oAµ)¶Ô餰ú0‹ÎÖ0üÒÆGKÉ0R—%¨d„ópâBÎÙú¹C<£Â£Í“ö•@ívdgÓj‹õÃ8SÕ9¶±e9`›5,Çs)¡†0›9ÊÊx/’|mf‡ß…2s_±á¤^T»ßN¥0O +æŒgú£=“—ZšŸŸ%B9ýÇQ¼óöªä£ÔÞcx™â“>qNÁ{8Ü D÷Ñ~.Ü`ý$t öž}<Ø«Ôq2ž/ݱ#•8¥Òè»|l#jV ¯Þl_Ú:¾ïíÉ¥×$ðð`$Bsç9„og@¤x˜·{x›/<ÛšvK‚'ÂKNLØýËÙ³)b‚Ô'wúèÙ}Á)æÚ50‡Ø>®¶â°²Ð*F2ßÇ8‡´Aí©.£‚ü™GÉ©¹´U_Ó`ÂØ9 +žËlÝ…±„ôî°ÃŒáò‰%f-Ékç^¼êÏLÃgÌi/fKwxVòu9]%uð6Åaž¾Zc +X}W´ŠÚ™ ŸØS×ýΓór/âô˜!TÁ[b‘T{‡}ÚÜ;§ñºE->sÁðÝ×…P®ÝËí¥ä‡{¥EãWB»¿}Ai›¹í6ö¾(•g +™ ŸI¾ïZ:Jt좹À.>);9þ‰FÔ}ŒóõîôåY…¢”pŒ°Â^k¯švouÑ {í=Î_ŸK§Þñž°õíLBT¢s§ìÄ1xkÑVÏ4žv´6öh“ +Ÿv”ÃS¯’}C38\bXÙ¯…݃-ÞâÌ}/?ÛŒ»¡aÿÔöf; +ÛÊNÚ²¡‘ï­0¬±ñ|köR›tlÀ£`IÀOçðˆmÚ>s±î5l>V»¥Âo¡Lz§üÞkºÙ[qr^L[ôÙ9éN“†¸÷tpïÔxÿÙŽgßæ–›·¯®µÙlï…€Ý÷|,Â+­@W /Üù¼s ŽO%gh¾5 ÎØûàš*iƒ%žulpS•S;ÄåIGÀ´vøSdÛPºç8χɛ E²ôç€é:­T¬¢Ü6·=¦`Šœì{±³áËî¯'‡ë .ƒà }mnƒœ%ºçªkL4>CÇʇ|´¤aµ 2ÉÐ>g¦Y‘L]Ü$Ëý8OçØâ=Ó0Žúé'ûGÎ@à]N.¡­%³¿µ<,4¼C}±ãþçÔ„«?fQBC¤ãþ¾°·—ö¬³H{£m¸ZHnðwz¡‘j®Ã×ýnã¼'ô¹lbO2‰5š ‘O,ÀUù®Î »Îa@§;â%i®AÍ®Ã5kÈ0'¼‹Nà=•¿°s¨”x÷[dÛšš€Ù§¥KB©w’{©ñØÀ6ö¯^![»;[ „}»ÔH²³rƆÚÅ€Jç‘ú¥¤þ×bKfН±|ÆÃmTÝúÙ&JL¾ÜÞáÚ1KLîþ¢±DðÒzãwù¡{ìN|½bj Ñ$£H«§ã¢Çù–nªJ4WôÙÚc‘.M%³VFN¨Vî$@ëŽiÖVî0ûŒÚz›³¯Ïy7¹žNÕ½Áª`^u¿´À1%ƒsìÿØQk‹Ãæ¡`= YYø°ôL³¶s&YŒ¯œeÀY†‰[¤mmp<¡n {ÔÀ¬¬·'­íí˜vÕH÷™I ÿ—N)!ZÑ[Ayr¸ ¥ºT¶õ?½Á¹&Pð«r·,NÁS.‹Cµ9ì#·(üô«9¶Øæ®ò;pq·X•Úp¬Ð÷7–nðiFßÎT-Ý `njÓ+î.ðasŸ4–iδNàAgWã¥~G–HS0ù(´ƒ§TNâË%ž¶r5©ß#ï·À4Ê­ke¯vuˆ‘4ð°ï^8Ü‘Íüº‡Ýð[3 èKn™¶\]ï^L[q‘šŒ“ÜÎCUßQ+{Ør¥%ø3òíå÷*4²ëÌëzKKƒÛ+…_ÆDç¥_¼~ÏÀëÖTfæÆTt[R¿ˆËbaS6ÍèµN¹h`Aڦ貒n½ˆ| zCèÄa±~øßCHIW¶‚Ý›GÙ-Ò¥þYɨŸ‡ðʽÝÁÌ«Bíjз?³Ô)ª¥]}p¸+c¨ËÄ`ØÃµÊ:s(qÅ%Ö-'sú" µLd=nAÖÃòàÊEpxQ¥‡= ÁVdÔ÷¢Æº²h9´›ƒö"ÔõÐ8ÙgP¤Sã&¾¦S¢âJÝ9Ê­&k=$fV½ŸÁ=Æ. 0¯9Ž^úmյȕS;&¹ïñR8ÙW‡ù½íÐir·°©uoª§þñX^ñëÁ×3Ýb¿Ÿ‰·‘¥ÜÃ…é´  +뮇nþak#¾ExÉ!(äoÿùë_êÇ?‚µx‹[—î>BÞ5G°°äùwíÔ©õ‡ P‚Tß1¸93/`]3ðqw.énš%ÀÕAæ+M®vÖÉìÊr»aUÓ²AÑ\7Ì@…ñ{0üŠJ]É;K“W†ôŒÊ*U¬Íƒo ™¦ø 7ßzeâ°¾1ß`ö>pòÈ 0'÷ÿ¢Â<èÙ"1°¢ºÍj“Dj¦š”¾|WT7W´’„¡'GkK`ÙU›·2ÕrXê*7W=·cw8Ûõ§ä ªƒ°}ZHìmfOçºNV]&Ÿ¢@T laX:²kö‡Œc7Ñ&|yæS.xFÿ-!Xn_ÐP2ñnð®Úy\$ÌÎåÖ£xÇÎb.mGɪ ¦žPÁìr#äS 5œ6ÝÛ>Œ¬ár +g¡Ò‹MWÖ;Ó7þ<©œz—PÅé=µSG}K á]±Ùvý®¬Î~èß—ú¦¸n5üØ’Îta©tÆŽ j.nAý1F Ê!mœrðšc†?²*N|o×Á)C¶¥Û廡Æ#×#()ÜÕL,nsp^ö“åêäù¶ð¢ú胡}PP­a¾Pj¬âÊ,,E Ãoà*œ qQ°¢{¬ט+oa»V‰‹È£ÂºiH‚?8Ís\•Ò5c{Ïá>¥z½K Ä¢ûM|ÎPé®z‚»› +—ÓcM pÈÆI7ÙŸa„Kî¶Í^r` ¡7¿äðLï%x¦Ç%Kñº˜ íÝÑ êÛâ¤öѹé‰L¤7Í-1ì¸Ê¬¼ W¼Wœ›«ïÄG!n=»žr䆂yÁÀUÈò†ëvŠàV0js3ÂKŸÑ¨=Ûò´}G£ìu÷ꨯ+Ÿ6I¾¸˜…¨è=³‰¨žzÊÈÄÕúà,gZ;Ú–´KìDÖz¥ž Å:àÎ ¬ )Ķeä|vg„ÞÀXkõv#Ôè/gìÃÏõ™(½0 YÃæâàøÙp\ÓW3Ïkˆ†¯HA6À™Bê_“1i ù>É´Öʰt”Z¹±Î)¸±Û³xžÙ9º V¦I«kÔ$µ„ƶ¡ûÃgª||Ký7Þ§ÊóÚ‹<"“œkÂÑùD«Ô8foÈ­YXÛº¹UØíSÂ/~Qå¥Gÿåã‹u›rlù?ÔÜ`!Ÿà[,CNmŽç$øsžã¡JiÄšã±C„ˆ3 ²ËÊó­#þÚ› ÛpQØæ£ /_üá2Bt!k…šêøá¢&e­ë2à#m‰´*x—È6*Á—ÈO!,"'üÂ÷Åe?ð”a݆qЮ}¾ƒ?#_k£Û•5Nä#7u| +“ßác ôýÌÀ¾ÉÀ5áÔ4q›dórø‚ϰ?r¸éYC ì稒˜á;Ýn,³žñâ‹a-”BÁnæ VºÝÚpx”_Œ¦ý%‡ŸÕs†ãéFà­æ% o7ñëÍß~ÿ8<«ÏEõ-ƒ»ô,%Túæôø¡2ëëÁ«Í~ƒ›(Æÿâá¶:ñ ˜Ÿ‡kË/Ÿ»™Š‘MšIÜ÷ð¯Ä•ø¯ñì“àèƒà²ƒZ[nU¿EN¸·7±˜ëÑ7Nxןs¢¥J·æÆ¦àe °J‡~”N óÃs`ƒ(rh÷¨ä®óñk?Zêç8Ý–:ΧF1²9Nµq î†ÑU¯Ô>þÍð³b°}0ìßFO©^sG]¢Ôró)ÍVúÞcøô[Zf†Ô@5޲6_Lñ¾xÔ]3BôT‰ý`­ûh7ÂKjùæµÅžâÿrçôM|xt>€Ô2—T‚!|£†‘ùäÏéq5lé^þZÕ¬Fùkv‚ýX‚mAJÎÒÂŽ‰ ª˜ €ªœ`û]rñP‘?#xLKQšë ‚:°.ÏVÁ™–Ë“¿_|¤Š,ñW€÷¶½õ[v¾»[À€ô ¾´Ã=¨TJ¹29ÄïÕä-·ùvØ´ó+ãVÊÛ>u^üý™ÏN¼BŸµaÛD ZßBèF‡ Öžw¥Óçll‡—nú|4ÚK§ÜF;±UU# öL–‚µ5xA5¯7¹SÜ´>vvë\|oú½[Á}[çÏ^Å5>zP;Ã0¾PpŪ àº,ý›V¼¢áfÓaôŒôôžC¦×mKÝò¸ûwâ¶:î, îdøâœ{<›>í;þEÜ]H|Ñ·äÁJǽ»ùU^¥Þñ{O™¤ó_«É’‡ù1wh{}(Q²{Íw‹µ×Üû(ʬšŽY¯ +¡Ç”ø@l3?[—3p>ý\à[“|8ÁžqØ6AƒÐò.ïÆ‘¡w9 —8€¸ŽÀ!pÔâusꇧQþr±QŽaýÍ+ÞÍ®M„RF{š½]»9½¨¯Q¸.šÎïH­ÑÉá°71`+Kª+·;üzå_>'8ÝüWšõ‚Ÿ½½”Fóå™= UbðÊ¿ƒ@zn:ìe+Ipäjß²mOï…w;4ÇÏÙ$ƒ±£óŠç~ƒœùïƒKlfêG‚{ ôÿw=î/üïa­ÿøíòzÿúߟÔÿÖŒì-‘Û!rk伋3 +%?Àµ¶ä3I`í[ _Éóñë;šó#¼>_¬ä†u—4ж§ô;5>2ðÝÅñ”0€OºŽ%ˆ¾Óɽ^w«¬¿´ëñ¹µoØÍ6îòް+ïæ>u¸p¾ÖôÞÁ/æöް]²ƒ4d†1=–òU1ækÜS Ï-u·r­>Øz¤2XÌüùY\=)`¯¯(­x±Úy¸cÓRØê­Kú¼ø+‡M‡ë݆Êe€¾:ß۩̉÷á.ÁÕ³O,HIëC G~qt=ϯ AšÊÈ/£Øš7mpˆ`¶®ÊÕLQ,.æY{´ÚaÔ'Bì°…w€¡T„a¢©t#gÑ1»¿Ÿ~Í©®káÜ4ÃuŒÁLdÄpf¥ëm;­%@èÅui-Î"M·R퉠vyÝšOá»®W›Y¼¤Ç ,󌷫՞]Ë è|*×í @[jb½]U‹åúNà½ÉóLÏù´/£èL:lŒO& #=aËeGÞFÍÏaº¦üâ±ñ .¤…wˆ=æFhtúxÅHÿ+Û}ã[Ushp%Ï+¼å“ãu[¯ØkjCÖø´¥¡=^ãFØe3>qã¡¥/F¹ +·œ[kœ¬wˆ¾K½×ä„Ëg#ûƒìôñ³tuý'˜þFŽùìzõòÙÚMýÖËi•áÅ!ŸYÐ]±”ùQ,Oí²Ð8¼‚é v‚˜£kƒkEƒþGÆÐJŒARvHÚœÙ'±S®×ö’öLHé9láô4Œ;[ oŸzoWŽ5ë_IÛ3ÀYÕÃåz"X3Þ¾µ¼#óJÎ|ÝᥔÑW9`,€®ù½[Í~Ë zõ‘›|Ï€|=‘Õóåi™[*å¶e†ýŽLLèÕàm‹îlçt¬M‹b£rDØÔž˜Õ ùÁ½Ð‰jõ8Š +ZRãd™´§{Õ×+öõ²ÛSƒ JÓ…µ—8ª¤ý À“D…,O‚¯"ÑŸ|Ü–s>^bl‚(´Ò3¯Íî`Wv+u Ä@ZYMP.4ŒÄ%øø\óo\ŸëR[¯ÂvÑ +‡å=.Ó³tѶZ-6È{½‚š›fƒ‰‘æ—–*5 kÁâN­Ä¿<9Ú­ñÐ9¹¤6<\gˆ ¬;A¬MJùºj4çÏîÖŸÊ%†T3eÇ•ˆÍáÚ³|Ë S:÷¥Et”³(ÈÍík%溌ÃÌc¨™¬©:dÆn7EQè'x×ýÌ]‚m+é«VEh>Ô§õùýqyÔëäÑyëP,ÝÀ]åP9‡è ¬Ã´yØ»ŠVÖˆÎ×b6µ˜Á[ás×W¬Ì‚÷¤gú¶ì&G/4[/¿›¬¼ïÛ»y`¯æï£_P–û<÷—y>üì#á|èÌÎ2;%Ae\àqµ¹OF°2úÍù¡€ ahS€5Rÿ4I65ð²ÓêF‡Ì#Åš “€ª¼J =Ÿ‡üãðÊÁhiÄÂ’ü–1xr†ð1B¥‰Ÿ)ÌÃP&;ßžÍ0ðt°Ý»tMEo`»£Rr¦./¯7;v+>G‡àð`³óÁÅx‡”Æ‹aW}Í{÷V¿e¡a 3 èMOÆÀÉÁ¶Cý×å~r?2°íG\ÄÍç–Tl–Š‘i@²»ÃëǸ\„ÃX‰øE ö^o³åòp9f2R4rY‹È­+Ãþˆ²e…ykuHHñÀîSb1m©Ï¦?zµJQ6¸ @Ü&3:»„Æý”.màëª~Jë0p.:eO‚°÷¤»€W®Q…"svޝñí_b¬”ÇÖý›ÀÝ¥ÜÇP¬šìYR,‹åµ}÷µ>ïz‰J®–Âù$?Ù6¦1+a‘]%I¶¤0ìJ€×½­ûŸ§e,™¬ú»žÉ¥·|Ýs]?]ûmˆ#F†‡‚2´øOûe’ònk¨úÅšQd¥&ïLðÞâ-'BðéYõ†¾„ðgÖ-Všê-t÷lŠG8À ÄXÏÒÜw²Å½¾€Ø2ÑÖNõ³àgF“ëM9*ÙAkëËœ™Øº¤åجzuÓ ¯Ñ¨¦«k4¦çíVuõ©Ãs(Ãè]Ÿ«­+¸:O‚™Ç'ÃµKfØá­Ð­ìíA2C% ¸ŸÄ#ç6^a,–Á!M0ÆÓy€ø2èøš6ŸE{×{¦=vhÎìyFÄË~šÁi?ŠÏ"7ºFf.Ø­ÎrnyÚqÁ/xr1ßDü A©7Þ1×m§\Ƭ;@0ƒkÐt~#u âsÈüíòÁ:§¡¤Á{dYdxÀáqëM×àÝ–+ Ù]ƒ•H×]“¿k¥iª] M:6”‰‰ÌR~23tl8ãÛ©t»—O"¤=/æBhæ#Njžsv^L\öáÔ¬ô±DYÈù¼‡± hÅúÕš2¦¹§h¬ƒ¤Tûšš¬dL‘w(í½ÈP=^cR[’Á9„öæ"ò ÚGŠ×ÏÛÐÖU‡¯Œc…v EŽvKDÊÊ fWBùEí(äÔ:53H§Ó,¹ ”š¹vKj~n{7{ßû`R§'A°LÂiš*LAÿw[àå«Ej[¹æ^SN!†OtÆ5ߣîÏêµ¾k¨1ÝòÚ”œ8Ùxãê4göT‰83Ó–«ºÄä*LtµS¹`=ÎË`&2V ®^Λò¿¹Ž=¤¯ ŽÌ0Ýi#î#h«1XJ V‚`ûòeÞ5ö¯œPJŒ`™¨‰S&b«D6¡Ÿ8˜{ôÉ`–,˜÷½Û;§c¼ÒrtÛÙ"|¤Ï!j àÁ&Üo-ÜBs˦W\– I`O¹6oäÎÑffè…3´ú$’ðè‘2óƒðwÛIÛ9€t(HÊŸ 6÷Hü¿”}äæ]·$QûËÖ»jì‡Å=ö¥´«o´õ­é'ÍÅ=Û™ é%D8û5nÂÙöÊ÷nbð Õ¥Ù1ÑÑõ3fbq¯ZŒ¡N%¹ÊTZpÎÑɲ?c¶$/; õíè¢zaz.J­ãnr2þ|Ýñ!Œ ìEÁ±ƒÝ$·YÆez®Îãs5t¯ ó®FŸàƒ ï…FÍ;@_”¨$2SÛùa§±X/ˆ×\ÏÖ³¸Ããö]X\Ìl(áO«»éÍ^Œœ»ÕD÷[ùÍ œÖö‹ÏQ9Ýg= ¡ž ]¢Ê«WãdKÈu^m2D¥µ©ûƒ~‡$“ Ì}úMw?ªÆ)‚âsBm{ƒÈìÈÏjœc°o=×P•CVûQSðÒ‰ H2Ò2xrʤ¾ì}jq7rH(+yá.}¹-d‡ça-*‰Žÿ¯8¹Ý·uÍ\çFŠxo½$žï+€÷ɤM|DÍ^ÓK༳~â½$aj%ÕT´àœyÔ×VªÔûÔ/é*T¤õëiŠžÙƒ¯Õ­§‘ªŠ›–åú.¯¤÷Õå`Œ:ƒÞmËs]²á\þ¾ŸÂvú©¸Ñ±Ô{ 6”ú¤"¯/ÂõždŽña¨5M­ëÔÀýzO<¸‰I¼Ú9ZÚ-¯M}?|#>°™äÀG,XÖTùŠk<½ñÞÜÁ*®¸kÔA襥·LëÊ2ðeÚRÀ_-Œ´"l5-â'–º¶öâç°îFež¥hB±s0AÚ?ÀO[I™C€}†m-hп•‚®·¡ÒÓö°G‘ð™i‡KqpŠÙöÙíe¸î#‰¸C"×å÷­Ëí©‹õñò<IJš’~oøNŸá$”ûÜaŸÃ Ÿfÿs‡ fk|Z÷Rý©~¸7 +œûÖ›d¥Ç°¡<Œ}Lz¤¯ASg¯ô(ÛÏ=XÙÍšHìØ§vªîà‹áGxImv×{•’ŸƒÜd•±Ý“H÷·Óª´Ò^Á¥ûŠ˜ÁKªý,Ö#X0Ì=\‘Ý øiõ£Î3à¯Ö¢3-éÑ\LV‚íßÉ´¯„.zaOìõ‡ïsÛd—Ûw£D¿õà¿C—¿n+¿xºRʱxûç²hU¯(Ôöu:å +gwíyç‘ÛÍwÓ°‡C^òZÓÅ[;kÚXÁ<ŒÜV}_´Öùêá·çç,mŠs“œüÿñ4ÅÅwÕ^U´ò\®Ä2-L&åÊBFp—Õ$58`‚yähI D;Ríø<¾S»»Q“,ô6Ø*xÄVÖ–ó°TbŽ›)“:Ê/lêùºf‚u°-[ߟÚÕ̃ͬÅÊn;„W¿«jý€Ãð=Q_³-ŽX«–’Ò®ë.D¤è\ÎóRU&O1æá:ŒÌäÙÒWè­ªN»n“~EéÃ9 À-§WŠÄY8TdÎÝœÆ`+›_Üs&!CMÁ“ñðbê xJÓaXu5Á$߯0}1KàUEèÜÅ&ë +}à­²•1_„‘҇ĆŸßMúaYG.ÉhùÎȃëÆ]ac¿]œ•rÍ|×®ƒ½JŠÏr[螬»}øŠ6&WÂiïµîá•ÝbR;°#˜øÃÛŽÑŽ"ÌTÝnvNãáR´Y†ÍˆàÁJÕ6oy— ƒ>ï0îu~‰+?9“H-'÷â#§æðˆ0zÓ0–Á°ò[‹Ûñ'vÇX‘vIÁ^²r„ÀæñŒIì:n{èsut–²åú°É0Kî¸t8>WWãÞÜq9ˆ¼ØJ'xñTWp;´{MèèçSÔ3ÎKJ-ÖÅËÏW†¾–øHk Ê£€6é<, gáð˜n:CŒŠ¯”W:‰F# +yå\LæÙ +{õCÌ0ØúaF¸åá+ºòõ©®dÄzÝÜcû_œ–?7 ”ÖW¾k‰RøäEXžÌþ#\”Iêç‹Sã£&Þbšnœ±ÚbëØ‚k÷%®®»˜¬¥ñú©‰Ô^–Ï:›Få +AÏÒ">lŒIF^yôîÐ2¥vU«þ@P½<)zóÊÃËÔ¼­èa…<–-k½ìÔ†w3ða™â<Œ, ðôö"¦ òµTûCa‹qDœ5%¾uÊ' ­t&<í൯Q¶:¡æÞ;uf[ˆÓP=3Ä)Aô"/6lG;ü7^£œa+„WŽ"º­;±CNNQè&Û'’\çDä}ÒüM·®•‡ÇÔ:íã*V[7ž,½|"¨¥,\'Ã6ù›Zš.¦W˜k°ú^7§ý“eV-dËžškîO†¥¦- +šù“i™L².Œ‡ì#PúÑá[ýgõÈ[,N¥k:µâZ'ºÜXÚ½®òÎ2²#Ü j‹,¼Jp'³ÊUì¾Ï§v4d¾ Z”iûJà“ÅãÕ'_­çø«i«ñ +€n# 6¶ÄN­Á“áÒlôöžŠPk’^cz¤þ0Õªþ^)o]£‰ï¶% +¹Pžjý÷,z€%Ò¾ ˜{ºEXÓ Zó÷Ð +ãZ*ô#¸˜\Gûˆ¹\î$53e÷ +†|V#ýtÔ›‡¡íï…‘“Pù°ˆËßóîà2~qL+‡‡×Ú®• Ö #X{y@ƒRvC&åJNrp†ÀbØŒ 5†¸úÉÈCh¶°_˜ùÁ… 7SxËO†;;e‡”ùnëTá¬ÝŒ_3YŽÊ;€ ÜÐdä »þ|ò[ǵŽj!O{дe¾Ÿ¿°cL½b~@zÇœ±%Á±ý[zmv×é§½§Iïú+ÔRqîPfz÷³óί‘_ÞgüØ®kh*È0¸ÁÚ•½u­™õÎñõlª9Eóz…w\u'ë`q.ž|"m?ƒ+lË=2f~Þ3|ÖãgoÒ”é`|»z]u¼»KI®½üxñ»˜¸–튎 ­åã|ÅgdÖââ#U´Ó¸c |¶)‰­Á7T=A’¢¹¨ÿªvê@àÕèîŸ'ÀÆî½fàîvÝ4æÝN+<\žJj+.vÕN¾‹2.ýæmÞ=pï6š–í©|ÚGt@¿¼c•»h­ÂÈ£6¦Ž0—F+ÅE‹Ú¡*Ïú. +ã‡OÍê›Ív¨Ô<úVÊñÚXe+&1·ñpYC¶ Eð¿ò-_±vÊ–rÔT«goÏzUÏ­Zùl3;,}˽µ•³3‡X‡VË/ÛÓHú.Úýü«…ý<‡é%Ø)*6Ÿ.;Á"7òøÐ ÉÏñ½¿Äq‘®±§[òŽ^s¿‹Y¹Âݪ êÂÒÑávòVƒà9O)‚×CcùÔú¬$ŸÝnŽyÃ$^bËY‰…Aº‹ð2{\R§k¾¯ÁÊ/–S~Ζo¹}ÔhfºsÍ6±aŸõÂÕçõ¬[[Q©M{ ·Å)õ±K ÊùÎ$#‚ N§ÖRòì»|×ïÔ|[mßü„籎T„ q½– `iMË ŸèÖë œM¼Nu¦2ܱ¸š6×/@pÈ·]Ëd ÄN±°5é?óT]ðÃ^.›.l…ÉÄŽÏåÕÝþûçR/eëºK7Pûön0{çÃæÙ/Ý àM5ï· mm#È\W­+vð‹`¤ÏñÜÎNFt£¬ì\Â5~ÉhׇW;;õîU»ÜHp™J Çú‰º:ˆŒšsÞZ¦àVv_n $ò¤÷(#èy˜[¼®Œán&“>«E› _ñõ­_µÙ@â‹¬Ž±sžQq«Ó¯>òöµ:j`|Ñ×~Žwã¸!“-׋Gà€Ç¿ºÃØì+`¸Ê\_Æ“½Z¢Û3ì:9%@_±«Q¦_ÒÀÊV­«I¹¬6ªR!ìš`KePzc2öáN5”ëIp‹GÚbìhÚç£-´dpØMÀð•ݽ{t÷ñ~ª R\;8ÃDüpÒñQ1ÖÜûCB{Kú"ôXŸŒà5ÛjXY]dÜv6ë<¬ò¥çä;ÞÛPº×Á©¬ßjü¥Ão÷ßtWK’]» ÛŠ7”~Ôgœ {¿ªÌzÿÓ€"@]·_®²a]Š"ãxü¯Á¸Åbcâ#l +\¹%Õùa:QûçpËà,ÑÒ½ lO•oÔû™/¤AÁú5‚õñ¹{”Vb,ö§3(# ·Í„y†¢êyÆ&x·ˆ¨×Lkq P½j g,ÏjGý_w²ßŒŠ–H{´bgù½mànJã×V=ÄæìJëcŒ÷G‹Ý x6›7˜ëø’gŠ0^„†:R€oâáS„ÎÆÃŽ¢Ië©ÇÀÇÔ®Úßá}4j[7•lϱ¯p5aÚ`Í`Ùw†N (1F··ö‘CO +¾dü%¼©éÇ2{ Xs(¨¶ÛÞ³­ª†NáG‘U¢+ê9Ç'›‘Ýíý[JIëêy=3l.Ød zgS;~hѽ)j½‡q-šüËŽƒÍ£á¾BË †ó˜Ë,¬Ýˆ¨}]Ÿj{?‡ÀO%viò&°Œmûö ±\.‡– ôôÖ(£.PfÞvÞ.q°÷¦çieé÷àä£çDƒwù µ,E€q2ƒë¼;<“¸` xá¶°™ ›g³W­ŠW[ºh¶Lj¯ã¯Y‹;¡ÄÁGò2µLŸUà)=.kÜ'e许üâüð2×T~ ï«ð9›Á ‡ +… ã²×,X5肇g£À¥)5+po +üÁ ¿øo36YKà(S&¯Ï|£“·@1׌†8±ØFªë®³Âò,;n%"‡Éè²× +aL…uCW#à‘³{KWæël‰ZÃ2œîd#,„{i¨Î™6àZSœÔW´œ3ç©]ÏŽ—„ºÉ*Œ6ïkÖ·?]ËÈ|ðÈÙ®3™Ûµ‰Úà1ož%zõ¯ˆ€µªqíE”b¹QæáËÜ>Ç]|¡a¾Ž(2Ìlçay›CàÓ4Éɹ¹àúëÏPÙÛ¹šqóúñúG´¬^œHGƒOKà™•ÂÝ>"¸mßœ—åûå—ð–i¬®ï$w÷ 3s6 ‘ÙTÝvÑþq5“_DoçØ–ì%j‡¥¨¾G0·%foµžEj֣Ȯ:KÄZƒXbý3‘ÒúˆÐŠv6ƒááJsáêjGƒUhä‹0OÕ„ú-¢>Ímþ!kÓÞ‹Tcâ6›¹U‘þÝòEÖ ƒ ±ZKõÙ'ø¹Æ3é¡c‹ª¨dïoZƒë0Û}ë0DމÝ'rÀü„››ïv»„"úçª}ÜŽâ‡ÜF ©ƒS{Û¨¥„—ä|—ôúŽÛà,¨^ŽÅvöΓϖÍV½Â¬ü5Øb}©z»mQ3>è)k½V¾~[æ Tžì:|‹:ˆr}?Û¯¿XùGæ7¬¼Z/k +̹{ëÀŇ”ÎÊ’løí’ué%€MvŠKLô{kY_ÎyåyfƼ)¸ùöH‘QÁd }­hí|¼“}Û zmN\ZT®ü‰æ¦«õ_Š0Ö’ùSŸ;£Ëá’ï +8á•9Àõç¦an™¾_]”WÄò™Ay' à«ÿY¶éúÂ4®š(—fßÁIµÃ·ŽöÙ–Óµ xì~¸ŽÜ<é$ü[Ûøt£&3?½‘Í/EØ{Ð}ÁÂEÍz ##l÷ Z2ר<\(`ܰærÑ/BÙÚoŠæ6¥mEXA—1õ#lFXŒ;Jj~=ÏÅÓàé°5û#ÝÅtÝØV\6û¡uoã1JØ€(žú̵&#\‰º—:ø9³&œAØÛVbP¶ñ"ô)¸GÓ-º[ÛPž“Õ}¾Å1ûu‰ãvä\hÁgöi—Ÿ<¹A<öêÏĹv E%ó×›NÖјíÀÿ¿&~O–µ9™W}Ʈ弧Wk.r_‰·žDµt>±0ùåÊì²)޳ðædQ_äõÆÖhܯ¹Å¢Ã¨A‹4úpòí•­AÔ®UÒ2g¡y’ /z<ÝÄ<œÆÃ›\‡;楻 7~ Ž:ŸÎ•w“¾Ð(Uú$ˆ7XŒ±óœ4.ÀgòI[ÅxØÎ\J˜nÑ ¼î##Ø´%úb¯µØ>â%ˆÎ³eb(ïýÕaîùV .KªqÝ [µ«à!ë‹pV¼…¯¦U4îç›å:dz#ã†]i=Aê,µ«uÞnÐ2¶—ŠÉI\ÞªŽÑPü–~ß?ŒöéÛò16'n𩾫kŸ+=×&DÐÊ‚6”ÁNÞÈNÅ—É0çM÷§Ê^×ó%Üöâc”bÖ]´9²=v>üý4ÇûÆ·[ÑÜ™Ájqx¢ËbÂI0ñ +°÷äõî_y¸ú’C Q;sý >σÏ;½F)ò"/·Ð†nŽ7iná#ÑÔRFFÆ.¶‚AA;lHÇFÐøµsâgî/%™0934ÃI¬Ê6yÂTVF€Ïa„k[u3[X¿õ¢ؕ͘¢K«M·èƒE«”ôòãáÈ%+‡Úß]NaØÌyNbqj/‡iƒ‡%éžØ¸fÕwҀŠ ·ÿo&L ‰gpÎR3-ãçÏÓ&#Àfí÷<Ü<‚å2‰ l5à—iYÊ1yÍõÕÁrëë ­l©é¾_¤Å×$eM¿»À­÷ÝûÍ:go¾Bô%ÜNlÕÝì¾ÚdÞí+Àm‡ëRVxaûˆWFäSðßdù(u¾õ%ÿïÄéé¡w+‚;ˆ†$åXN_2B×ê·òh6z¼ÿúÛ "¹òëßiEÙKØÝhËäN»‚ œ(/9êÝy‡{ºéŠè|ÚÇâã~ÖØMkE:þ9—¨È±•y„R4}gögÂÚè|²å…ý’ñ<¶DÜí¹MãE-Y×Rë ûÜ^nøC|´©Üh1½eÊ׃qY‡ù” 8Ú›‡;çà²+w~øX^ùlVؼÿõ*ÿ¥¶ñudNÓ]ðãom?‡2[“„Û¥^Vi·`??™,çÅÈoVØ8Vºäêr 9f­,†:tÎÏÕÅW9=àyëÓP™ëÒúôãÑyU‡Uñë<_Ð:ƒÁuÌܤô( ÀZŒÝÖ3ïDþú­çiiTÁ“N¯ß‰”3¾ê‘Ρá¸LŒúá¶]ñ§/Šˆ/™wMrm õ÷e/‚–VF˜CÙ–%íþq…äˆï°Û±bŒï4CX=bŽ1z•P.“e¡ØbsY§gèãÿä¾X¾ØÒa[•àãåC~äöÒ."ºü[8DrЇÁ ›¾»FÉÚ\bø6쬞Åxøä­§Lj¹[ªíÓ´W_šIsÉõìÎ’"Àd2¬mûXwî¿âð¢7õ`šFtŽV±ÉU ³Wý~¯„P¢ænäÔÜðd¨Ð:ËÜ)öç-¤h0öÓ´ëô!ÿv—°·‘r°š†®¼áS î“»àï¯ùÛðCqÝë‹€W[ñÚŸU¼{Á:Ô5óò½ƒcìö³Nޝ½)ÑÕ=üWàn¯†ô¼îú+@xmÍDý‚–Ë "Ÿ:öÊç“©Ax”:éà21gÈâÝÃ-édnE€Ø'ñT#8‹†µ˜KO¸DA4®’òµÊ,Û¥®¿©ðïÂ_Pß1(ÏÝw–oáœ7Ç[4„ƒ3 Öê?q´†Ó¹_+Žúš&×|ݾƒÿ#½êuîÊm` ïà:@IÔoí”·Í,°ß¿ÍPœá9Ÿíd«ýƺ<EgÜN©Î¯mRüe73¯ú˜{(#$pÐÖÔèFÀá¶èëIYWøûµ‹%„h›Vò–)qëj`-E¿5FKOÇv©öd0ƒá¯RïúôҌŸ—³pV©G”ëuqYw­÷Z‹†pŒXyÑhäpØ‚¬ñ:{-ÖU´uñBê,dÉâœprÛlï ’záf8ùè;»½rˆâzË»† P¾‡­Æ,žH‡£¯ê8 ¾ž>1ÞQжƒ´SÓž(Ø#E±æ¬Oä^žA¿Ïc,†Ýµ1l]SûÆŠ=ƒG‡CrãaëɨƒMRWIFØÐÇôtK)TL;ª2üÈ^l$ÜayBã篕§ÀúÔïÇÓй™Æqcux¶Z<\jc(…㤊P×~%3|Õ6Ai5Q3Ôœ= ;Þïónih¦Øþ.ù­o_‹€“ý¡lD–2øTnE«ÿúÞx‰Ë€­ðY/jU¼ÏR%̦tZUyv¼š;×ý<1rˆ)t| ÕgWãXì‘9@Õ06ø«>ÏÚ@£OÕgDQx›)Vßå饢÷VÈóýÕ7Û ‘ðÝ[ׂÑýbei¦zÍ´ªµ§ù[צh&>u Õ´Aš1ÈŽ—¸ë¦ˆ~ÚA)+¼„|,ºéóCxk‡qÖV¬(ÐcJVh÷ +ÿQv}½}0RMGç½G¾­X¹=Yt„Š­=MÚI»SÍFÑ$øajK¢]FrÐö’= FrpŸMf¾uy"”fâ½´ãã›CÚCk,IÚ;XÔs“H©ÑXùÎ&Çßì‰Ü@ø[t§ƒâ×yg \"š—¬Ž?ˆÛîqx¼Haà žS™ÛÕ»ßUyiq_ÌM9<àU@qÒö!µb™¼êvÎäáâ."¬›ƒáòá@n%õ¶è~˜]aAмš}@¨”'±}b?U¹¯Àç±þª°…höó³ñ"ñ0Øîú­Nwùܾô÷R»ú^?Ô",6µÇŸÑhGæáÚ¡=Db€Ó¼õ¬|»–:×Þæêâ…EžtµEû_°c«Ó·*ëpß(s3;ħ]I{Á`'ÿÜ9×È¢­¶ŸÏç 3u ’¡—¢uEc2lo¨ÏYŸ£×—òVYJlµ),Àþ9N™Ã¢ð°xЬðZ¹vÃÐÜ +“(ZJ;›þ.âŒ\«ôϱ¬0WÂKžd¼ö¥>-ÔyÌÕÆÃк\ñز‹`Áb&.Ëþ©aZ#·Ábº¯ÐÐM‡K»)fsº£tu'•;|œ?‰Ï)ï"‚-A$q8v¼ƒØ=2vžMF~é~ì‰<¦.xY%q)UàÐ>߼׊ +LlÙnÒÄçÞÆùÅJ7ƒ*¹õäE’ºaDVΕǛ?ZÓ­wçç–ãÿ0žçPõv ³¼5ð5$÷2F(tˆÐk‚œ\¼]}”9„uv¦‘—Ì:ˆñj“â¾MCS H[¥áγûHÅkl]"êd[!BMeqÇç#¼…Vu3ìë•DS\ßÉý§_ËL6G·{ás.n5œD¥=—µ'Âã ñ—¢†@ü¸Ü­F}WÄkÀÍ[7^›g¡,ˆ5 Z†à§š7pXÖy‰eK‡ë”x°’¦qN\2—Ü^zÈãK=Õ¥Hà¤ñÃÓEðy³n + †¥äÃ~}åТE\dºdþdnÁ.õÃÅÉwm@”üê8™Þí>…©EÐôÚhþnÄ·dåˬ\ÉÚX ÷îÂ1s²6­LcôpF;¤r¨ëõúÏíVv²4?@6Öå2þž¹C›wýéG£±ŽpDVO +?·b“øpQ¤·/=IÌD(1þ<™ðJ†HËeãMR®Ã;®):éÅL·;Iëµ’"ö~Ƴ¼¨£ŽØ`⪨epè‹ÓLOY°%ñ·Zl_šÞõúÉi4îJÎhs1Ç™2dŠQªËôÄÇÞ¢º ßqàkí‘•¿.¾Ø‰õÛ¿o$ki0Ë.À~$>î.¯xÁÝƶtp^¾pðY>÷ð] +÷p°ÞKÿìf/ðû_äö¹¹ÿã?ÿ[‰üÇFÓŸI=sûCøŽ¼rñΞƒ}K©Í}ÊÁ1æ‘0”­»xO{R#ÀüžWb1Jü›$;nŒtT]öd‡>Fsd"F bPÊv$ ñ{²Û ËˆPÖ¤úÍ?¯d.+?¸‚f2]úÃç¢ß%ëàÓ¾ ±Jða¿éÚä»"´PÀˆ¼‚L¡A»»™ 2„µÒ?·‚ó‹y‹0Â)Gkç×Ü>¯´Ç¼’³{OŽú¤ý_Æ«%9²^Å'˜ú­½­[ÌÚ÷ßN"HôºlGÏΑ¦O’¨‹;+Äòœ˜ÝÁ’g¤,.iðhUŒ>Ú³Ä{ûÒ¶HJYá¶ÏÁo ‚Q¿-?‰ív JY÷éeÛà+;p§´ÑsZ¾Ò|h˜Ë®íÿéæ&ªåÑ¡+ƒÌ*ÛùÃí›h4éQ`¯_üÜ'@‹}ǿΠþå‰ßñ7?Ïš_âÝå`ŸéǨØAݾlM¼Îv? +”Æ2jãl#¨Ñœ?}`Î&®¶àŸYg{PG½;/½ˆKëc .üææõ¨ðvš·„Ýé¬áÍéàµ!”}öY˜]™€åu=x¶ð&ÑDZéá-†õYÕS¾1ª·>¾#6fx±:^p¿%(³Ä=ƒþ–[Œ%=„‚;Eßñ˜z{?k#å)­EØ:ÀÛ¬±ûÏê ÔA¡°ýp5p5ªhŒC¼¶1)PT"s;Ô5Œß"?Äp;{b ­‘ÛþÎÑ Æ±4µ–¼9ÙRÀÁáóâo~^â€gt[)㧸JD»q-ÁÏI†oM͉:œp¶æXeÞÙŽeo“(‰ áì¥dœøÐº¾ °mô™ÝÚÜné þùϾ9Ï–MçR”ôW×]tVÄ\m‡çB¼…*þÙÍÇ‚~ú¼è?Pd,ÿúŽÍöŽbN¥Ô(#®[fà\+^áàš±‹ÃAbô°|—Ãm Ì·næöã»Ï±’´ÙêëEûËPèýÝ]&ÕW‹a`¡gìïl™m|/QFmFŸ‘f9ÉAvXx{¢¥¯‡·˜³ÓePhÇÍ<%×Î5ÁS"ì÷FS´øÅ›+%³oû¨;±µ¢1ÙÅœüøâç#˜»|Ñ×ßí®ÙŽýh|´‘µîýñ£çû(ŽsyJ]»r?ßáøÞ^¸šàªà½2ÊÉvÐ$ƒâï0>l{À%k|üà ×k6äpJ˽ֹ>Cép#š½Gh¸IÖ×/~²Z1®ú¾)›I¿5}8À×äV›6º‰CÙEe*vñ7ÿ·ý&ÖƒòÖ³©É¯;»ÄoÆ"§çezn¼ç!áĺŽAÀóä!ËÔèùu¯-{у‡çÖb_4:pãÒf€q¯ÙsöÆç/1ççhG\Ú/E à¹Zä^š Àª•hŽ‹Ë¨æ +ý“ênQmmã®[–çÀ&\½rU›Z8‹ Ê6òaëµ±_¶8¹'+¶/z€ÏÑbÓOÛRéy‡âÀ_ŧÐL†‹Gõç`qþ=àÔë´Æ˜—„m[ )Yâ¦AÆuÐå¾}\q>5œ(Q [Û¶>ÂC¯#=`X/ÝùË +c÷…ó—¶ ŒÂ.¯yØžJs!Êá Ô”çÌ1ïbŠ$ßÛ…ÁaàsဠZ€ê´dÜ­«` Íâ-%¾:/¹ÑwnÅnå 0[­ËHÏ JÁ•+ðåzÑÀ:Ö£üðXƒ_gJÕåO­7ïSonÿ>`\w +z{RÕÍQ6µÛÉ€sÍÓÝÞÞ®á…îöÇs™0Jq²š‰[2•D?¼ÂìãzàŠvœƒ_Ñ\(T½`°Ñc¡Æîç„;i;ÄË€uò¹ÙÔ«yt Å{Ò­áQ:u êàÞ•g‘Ö 9 ,¶å᫆ÆÖVQ"l_íáY†¯Fu5›×=k~2¤=S6aŒÝÚ¯çºÎÍb/¡ùŸ4è¡ +endstream endobj 34 0 obj <>stream +H‰„WK‹¬Ç ÝÌø6{q›zI*-í!‹„ “ !ã{3 ÇÆäßç¨$Õ×ó蘡çŒúÔ)•ž‡^†R=ºŽqáÖêñ¡X ih?îïoÊaLEGÇç¥tX-¼ ÑÃAªÆÔ-P.M{s°I¡d–Ë$Ç礞Ìr},fºTü’Þ54©-ÀÒI㸮ód.”2xT^Æ|Ñ)A[kj“.qVëûÖt‘9ã¸:á¬ÇÄGÃBœ™.“K PÃtÑJÝA’EüƒsŒ+ÕdOŽ÷‹’„G»ùÃÁÉ’Æ* 0œë HKÕf<’Ë8’¶^÷®†-àßÙV ´‹àCðt*ð1ó¹È¤-ðJóp°7N’Âõx—ùôI½haro5³Ÿ´KÅ?ý+jžwc©=”€Pdžâà +ê‡d ­=ðŽXxÜø”=;y@ÁDfÈ(ñ TC›]üá†æÇ}PÒŒ›6K¦}95”÷¡Ýv`BSeŽW+ÊyŽO¿~İãÈÆ}ªá½>Ê<ÞÕöp%[¸Ø¡„ãMÞ–­m½ðZd×ü%5p<÷8żâ9=C—^ÉÜNxZåíK¿•CÀ©ŒŒR!‡VÝpˆïï·q&l¡&a\ª„q$À2ãß(¸ÎÌÈzBž+^â§+ijœ58u\:ŠÒã ž} ÎHÑÙ îiãS³v]AP( õs8[–ÓÉ-ÃÑŒMBࣇ1û -†MûJÃuò—#Ë`ÚÉ÷ÊršÕžÞ]I¿Œ¨,(<½÷%êãúÆCò¢ˆãÝp•Q _õwãÓ=nøT:qa +~ê^2ïR‹ôHPG€ˆ“ì ¸ qϛ̶ʩb¨7{j¼¸J/ÛÏŒrÎqë•£‰‘™+¡7N³d ³¾ë WeÔžàHɵέDZ:o§”‹ +¹Æ]ÔêZÈÓIë¥`YšÔ«¾¼™QS³¶¸øD‚Ô­%Áæùµbͼ3\²‘´¹ Z BÅ×ñµ xòUØÎUT9ïfæ(oñLrv˜s• Œ"ḱ£¢×N¶Èxg>ßï|ì9g2Θ~¸ìLãF9¸T¹Ê4’=‹”ªaŒù$@1gßÊ´\5˜¹EÀ­í~’ôUBî\eÞ.‰ìRj8õœ~jɪl¸PÏ©È50z²‘ø]Þ2Ÿ‡¢ÐQŽ}»QGˆFªó<´D›EܽsCd‘6_ÆÎ:´åC4¥¹åd4åë¼U¸‹/ËîÈ ¶v󔏯”q2ûÌ(n¡bg‰CÙ¸ö¡µæ3‹“¡dM_³“[¶2ó,€OõãÈ&ñ–Ç…°aÃr‚42?Wj<ܸÝyñz)eƵãЧÄëÈë¬àØ•eÖÊ›ù7x®xEµÈÜ|ÀGÆXS¯€¶$–¥ö=“î†÷®øƒq§fR´áƒÙ(*Z#0%•,¯«¡:ˆ!*ÁÁÙÈÞ2_{@çFtNO†ï¹¾XÁ~¼MåxE³UÁ1À0e,õßœøp%æ•Û·$gT?¶é'À™ šsˇfQ=—Œ›×yï¡È}lÅ)îˆ0–±‡>ù±„èÅ\ ÜJ‚ãV”öf(¾sZÇîBWøKþóhôN(Û>Î‘ŽŠk_øjGÎ9ÃØ«µ&5Nãóý ç¸e¯±òÌÚG[ùØ€nUb²*èÆ>m3 +›:ˆ]¨9Dó·€ú–e%,Gm1”²ú†°t7Ä«•O,4ä 3U2”–¬’¡ò y»TKa¼&AgÕ¸BT3KÏ?Ó`Ý{3ŒY#:†Ôd}„`”¢ú+O›UÙ|3â1²w#u5ŠÚß_kj(X­ÄAê#Þ†YÚ×´Z~©'CÞcûèù‘#öÎéÇW¡pF ¼†íÊþ5}Ú1ÌÍ}a#›Œðñš§–±öíµ V¹rqÄ«xâ­wÿ5ôÅÛ¿ÑpÊ£MR/g¥±A»r8›¶½°øú†ª®’ è*„×N»Gu\/Œ—½4`P¸Ý+TøHeÇIÛà iW¢CÃ9ß:.F$Ã1Ús ˆ~|¼&}á_ LDÑ~ž7l³q€£|u_ޝïïÊññ—û»ŸíC=Êúùø_ûëwøô/`¿ãøýñ—¿–ã¾ññO¸¨ T#Á!Ѫú¨R­üc¾¿Â_âÒQÞ1P8~òܯyžMÖì—ýÔãß÷w5UˆÑm@$ÕG „<Â|ЯðX¥ÐÃ,Ú*‰ÅìƒtÔp4“(«´x?€.EñàîÆdaùAF)èmc:l÷2ê ñ³Ð¼&G]@è:^©ŽeŒ°ˆXPê‡.‡æpKa‹qÃçE°K9>¦• €ðŒô`@Å_ºu5!›Æà°Œuº`ÌhŒ FÊ +6-¨V‡Žšß·¶g(”ô6B¬ Bvoך²Þ[ëêñ}à*ê8†# c¶Æ Цí RëÂÇ=}€4¢u]Û!mj ã>¬V0DñZgÙÒ‡`3 ÷îípjëÏK£¼>.Ëf\›A|¾]8÷1“A&9HÖÉò$Ê…ÍP;6ˆ…×1ó8ê+Hmë :Äþ¦¶Æ|2à­/çq +¿X¬ŒúÕ0׋âîÝÑT«k°Ðc c´V °‡Úè ¦ß“ÇŠS.Ñ‘Wè ë8ÝâªRÄc À¥Ö,NipàTf/¯+¬c$¡†ådˆê6}-§!s•É`u8­ 8ˆŽFb ÔГ8ÖÙ凎ÎW9†xV!<ËÊ`hà{â,?t"?Î^³L ¹ú[ˆ<Ìà¬dÛ@8xmG‹–y C +4’à‘éT¬ÃÀÖP†ã²fJ€8ddºaÊqZô¤îAb]W³>ÇBÆ-l˜û¸Þ’Öö‹%L¬põ+[íÄk%È‘Ýu-Wîsñ¢ñ?®«äZ’‡¹2Ì<í˹M)ÿ}€Y}ûOÉ (`KÿÊaÌ dàÉp–­ùp'_³¶n›å|UOtÔ¢.ÕÍEê[l˜9G(Ô,A\è­ä(ö Løeù𜠞-âbb&¾(îë'°%–lõ_8>–Fq·Hç`TúüÞRà¿Ó/Aù+"À„UWaåNY^_GjS_Íé’|#HFç•S–Á¯ª +CX”ÃEÒÖAËOðø¨¾Ø¤Øõï×¼Ãlj=€Ù Ç_if+Y…1Ù§> £  ]{(w¶Lë–w€…VѺ¦T/ÔúñaxÈaÐ d°£[.±:ó-^èã`÷h^‚C9 Aè†-r‚µ÷/Âì» +‡uº> Aê`5EMÜk°2ìŸ| h¦­æÜ&|¶‹.ÜÃmÞ²Ùb—³NËŽÆÖAfØ6§ùs ª5¼mp³é¦;[å$Ó‡LÝ^,wÕL˜¹ \†p…Ü5§½nB>³z×4,þÿèçÍ}‡Å7 ¢—·syS í­Ý÷ÌÃIYèÌ)ýEpUG;/BóÞ· Q´N·Ì(SsÉ)Ó,´ªm›ú©ê04à¿Ñ˜k"šV¼ïRAY«ïŸ:œÛ¬I°²ùÖö-FøœhèG€ñ[ y—7E8Bhš•›âMEÞ—²ê›+”ª¬Gé+æsÿ-Ì0leê[ÜãEšÇÀW©Ë‡³KO¯šCw{c}¼Ù=çê}ŽiVƒ­Ôöøe$Hêþ“üRɪR²ïzsU“¡°LÝÁd/B¨BMf–¹÷éÐèʹëEÀïZÁ<~zÅîï°èF¦Îd¢8éXý‡Ÿ> â+¯;»ÁB˜‰ ¾ghˆ€·²}øîÖþBL•ä¸M³¢ŽšãšZ=´A® é­½,HæÄžÓV¡r~J’jh‡ö ²² b„¯9¿a™äÓÒt2ö•s€–_Ë +*{’GÎöçjÓ*Ÿåg‚ Ú7?Sd‚O^_yÝðxWL|¼&³ñjˆ…‰OE»#¶ñ)à¦ó +¡Ô?Û‰•,Ü•ßâæí6s) +טÍÝ^::D2@X½ôgàÏ~‚R·=†ËÞ†­ç [11äxr+ôóúaQíK›5å)#ž˜±®&kÛ êUM†2tùîhºù"U^¥¬˜š„àxLËzÂYàsÅkÕ‡)Ôeš¸8z :þíñ +où˜EÛX+†…kªicmLPS¶Xçãä-Ú¯u …Ó_AcQÉ][>P?1A{êjîÉ–&ß`ûÊ]í‡lÚ–¬Ù¨ØÛï~ž&?]ÐÕ寕g³øá*Ê{q! uP9ní­ØzsI])Ü&ë€ÛGÔÜËMâ¨:¢OæÒk1,W¨,IPM‚°Ðénîý©$ÚË•ÂóÀŒîéÄÀË˶ÿª” +¦‡Eë‡z5¾tš¥êé-Œf¡(Q¼›¯ ÕJ,¬$³a–dôNÕ)=;Ø/Å4ï¯Áƒá¡’¼ý{¯o}[ÐïªðŸœn(óäûØž]-ù{øô&.œ\Ê5¶…~¿ñ1y݃’ñ,þôÖDófc––Â}³jöÓ¬è’Õs1HÕchºšï¶pª0žxž}r;ææžp빦ÃQ ¸“%IkFÐŒ°€ñ–`¢Y]Ç)5Ú4Ƶ–‡Ñ 7Ú<{wU Œ.¨*Ò?™zös)°îÏ$øîÄBÚƒãåÀÜXj­Îr(-$Ê¿qç(N(é¥6…_3×ÃePnEÞ¥×8LíãŠe­¢PÞ³†Ú4¢ràîØÑ‘ÀutzFŽ„ª Öý…·í4¹þ8Ò1}Êy„I·Î¹˜ô#r c»ã\]Ô­TÙ@Lÿ"½ÝEЖ7üè¥,±3wPÓÅW+––‡ê@1ui~Á÷ i¢—_wÀ&‚ºZ€GWsÃá ï1rI²;0£ØbÚºEU‚ÇèÝ\—”ç\àž¹ï± ·ÎMZB‚(Á¤e< “=kzà¸\|3²EåˆP/™1rµÑÁŽ‘§AÅ@Þ'Ý!^ªnVº¡Ž¬8Åv€è©n€«D ð }dˆ ~ççBìxSöÖ•è7#`E]0 ¥¤š—Tî±Üzæ°£.øÜ‹ÀÄf&VÏS-WßÂÎ,µ«@ÒÿWÞ·ÿlë®Û(Ä.n¥ è$ÁSº¬o±CÍ_꼓Ì(¼ïÒâ–<Ü]^€Zá¶ñµÂÑÏ6ˆ>½³¶m„mrîøé¤Ê@¶…k¼Ã¤V‚ÕÒ-NÎ.°õ>¾fˆ~ | ñE7Q*=D5} †ÿÉ&{ØÀ!±òp9¼Ú‹d¶cöšòv +²6_¿{ÑИ>'NVŸì$Æ'Äw‹&Á«…iÔPôð¢‡Äbzª@ˆ÷dêu¯q‹?‚w;Â’ $šj‚÷“M蜢+#Â(Ëvõ’awO]+^`ƒ UsÎ2NìÖýÁßqårÄMƒÍ’ï>h ž)‚eij³ûcÌÊ5ÃŽiÀbs](B°æ­ÚÁÝÐJíÊN0‡Õâ07—Æ +ÙÎu†ÀÎ þ'- H%ãµö+’n:œ·•õ¡Iák~¦e?"tàóE¨#6Ú¥Õ•<ÒµÏO„ Ñ2€·ö?“Kç +|–'gwZ[ëË¡•SÀ!_ª.Ô5±iæ‘®E›5kêU^O¢_‡"„ÑT·ZäRX°o³g}æ +ÐÂIËš¯Y`•²Í´À /›n±²ÄO.|-½›<šn‚Ê– ªnÉ1K¸Y~®HˆÓ9”¶¥ôÂ+ü7,ÌšAœ~ÞY:ب];{„1»ëIR}H üD q7Ï¡^ˇµ îÖ¶A H€§—úE°÷@%!wS1wé†Ãe!ÏI¹,-Mvâ8x8kzõøÈ +×z‹4²À°¥ñšúùšˆ°fëê‡äj:ͪhž¡6CÅÆ½4ç‰Ø¨0>o|‹«–9á¬Þý@§…~xë[XP¼"URAô A–j¹'AöW’˜âù",µmàIh}YÐD†®ò`Ħ_k†ê†¶KbG›AË[ðêÝbÞkðBžd%¹Ö¢ÂÄK¡^ƒçŒc0=rhõ±}÷—ƒŽê°ƒ*tyf¹ç”žßãÉ2ž)ª0‡ÓOU’o«måÐÖM°Êâ²s¨þä-îÐ-ð*®ƒ®\&‚6¾ã fk4j¬!OkÓX­'Ì–XX +´kF~¹ÕѲMƒ °öêDÁT[<ÙQm‡zåO>å”Ô9^"K÷áµÔÒGö±«ìñE(óÚ3‹¶ÓÕÕÆã½ÚÍß +v4¸Ä*¼’@0x¸»¬Öm³ ™èL³›$G^ &Ö‡ë Ö]’f¹I_„R–—æ²#^#‡–vàš¢2€ÅqC6õ‹q饞(À©.…‰BÕLÈ¥¿[Àñ…2ý‹Õáì°!217Iãæiæ°©[º£ÁƒaÛ063˜¤OêBOàv8˜ %°Áé÷(-ûlø÷u²*õ¥ô׈‹UJÆ©6w€üý~Üÿ>",ŠŽ°0µí_ ]ãx>œo²ÏÿÙ¯ºÝ6n,| ï ›-Å’ºmr%Y±a ¶ƒÊ-öΠ8 ŠœðÇ–úDÝçè‹íh$Ù3ö6sN‡ÃYÂ@"KçÈüæ~?½Ú×ùñ—ýÁ/õ3ìÞ,¾!}ùâ·—/^Î~¾~/“±v&ûH¬eZ–ï­ß½¾Pò£æÒr9ï÷·ïÙœËê'/_\äågÃÁæÃéï§'\_õòÅÞÝëÞÛâ·Ÿ¸P +_O8µ\I¢W ½ëý°\Y|Ø/¨ùÌYf~ì½. +GZ“‡54ã"ÑL–û½½3iï?\ÿcW9+?üa8¼ú±·÷›ä´xZ|·œ×koˆpÛâŒñyf¿\.ÉbS]œgWÿ:p·<±Û¶Ü4°Ô*@K8 ¥?@£³ë‘È3r=ãIQÚi]ä “šýÁ¨+'“âŒcÕ0‰ +´´dÀ¢ÖøôjM¯aÀÚF°Öé™LR~V›fèCºûSÞàifœ@èǮ޸}02é—Ô’†Øîj7Ö- 9äŒv¢ÙgWl3BT´ù‚*ÕÔrK$¾×”åW\`†ZkRÖ?á~ ÏÿŠ*ã²+ õTÎÿåá´Ÿa¸l¸³5%]ûZÇ©rš²SMòŒS> <èšÎY¤r¦‰Uì¾Ãó;V‹\nQ÷ë™SZ%ð9ö&,í½‹á4†Ógq1œ†Nc8ý?§©&EŠ›Oƒ‹§ð»n<ÆtÓiL§ßVL§1ÆtÚໜN‡M§}ÌÜÂȧý£Ô±.Rɇ¿ÿ3ÿû/ O'1¦v/¦ß4ø¦jˆ# wˆç³«÷ë´F"ÏHç|Q‚@äñòÂifœ@þ®Þ²„ Òd»á‹Ï•Î3%Ô®í- ž]æÄî1á¬sLG™02áóæ9ð¬DœK<æ9]„B€Ãî1 Ò(Ð'?\¦©aðc·RM¨%âBqgôM;tVw¬íK UÀm0Tðê¹¥#02é—Åe¼aˆ$Ríñ…p€HZ3b؉fŸ“á5´ùB*ÕÔrK30\S–_q™i­É³Â]9=s¢xêpÂo¡ÊÀF‰@hÛ¶<Ú䲡¶®nûQ±¿%$ø£ØpH¡)öŒluœ)™˜2Á¨UŽñq§/´¿‚Ñ.¿íãNϲ6á&„²“öœä!kÛ‚_·ìdvôv?½G/‡µ—`øåK„ÛÕ@³Áˆáa÷Äé bè“0•Pú@5í&KÌî…rŽºwàBó–ðlcl2a7œ¬…¤µ. i´í–¦š±?\J•¸ð'´©ö5øf[…°iÊ£IkpSH36ǰ궼ýW5ÕjXÀ²ÚÛÂÇ•8DÄ+‹}"â–¬àÀ +°DãTcÓà  T·„R·p‚XDЫöø W¼*ØÝ.J4`¼õ6_—Œ4ܲ +XJ=/Þ‚ã¼ïðQ³\ŠØÞ$á–ß`fy×á5$ɘ;÷ú¹’²SâŒáDŽ‚Â[×ú?ÃÅj…Ъ•ÇlÓ¹JáF)”Š€ô„êªRv™¦†ÁÝBnH5¡–ˆ Å \\6íàXµûcm_Â`¨b¿{T‡ôªðm8C“nqYÜÆÖªðª=¾ Þ ŽÀ gİÍ>;&)Âmîô…öW0ÚåW£}ÜéYÖ&Üä‚P¶`Òž“"Öº<“Æ)qÆp"ÇÂÁÍâÓ'Ó:ÒAÜP<éøð9Ós¶~ž!kÂ,ާÍã!q:•é<ëaZp.,ƒkÅÞ„¥½wۦ¿Ÿž”­mäÿÑÙõ„ bKQ¾Fºµ]ú´uþ$4I¸3—·­÷åò¦ÊiÊN5É3N;—8’õ27E–Ê™&VÁou¥Ã7E+gEò¯"5~jü)RãüIDjŒÔ©±›Ôø^+-c´Œáó"Óª)çFZŒ´ˆ£Åh£]Œ´i1ÒbI‹'B¹SâŒáL^FnÜâƒ? c“ »ád},ø«wy¾g›ù9¾áñ¦=å¦ý+Þ´->ø“ˆ7-Þ´æ} ^W*c|žYø2íê}Y?¸[žØ Žm[î Zˆ€7„ëÒ -á–þ :v¶à¶dӱ⠙ð+eÌ 25ûƒQ;VN&ÅǪah´¨µ £ +µÖ¨tO•Ó”j’gœÂ7!Þ\ú[ˆY£F?\o(([Ž¥™q¡Ž»z_àbhXɶ‘|h B"‰@„ð1‰G#ƒ£Â@ø]¥©a ì Ão`­YA³d$òŒ|¾÷É„—€A¶›S¡TÒ§J(ýv&ýô®·yKå„r»z;xƒÑ¶•`Q+Ë}­k ºs—‡*Ð{w²^ѯݨ’Ë}ŒÊí‡²Š¡ˆ·ÉH¢n;I~Ü Ô¦u«@S9ÓÄ6» +î¾Ã3Q«E® ·pUn!Yv2$VÒX"c|Ôè ëmÖÈŽ¼‚Ï3[ôÞ¦ +÷aŸ/´Gð,çtJ(C®o½ (~mÓ›áü1‘?ùÂÙ þ„î|­@¹Š "@Ýùi†ÄDñÛúPpd•ÏŽnÂKÂü°ž"ÆÔ=ßyJ"ÿ°%¸è3[æ3MΨD_º†u Ø“E V o¹Ým8Þ„>îô†®^»C¿_æJ²¯{ß Z[ ZôgÑŸ}“óL·—¾ mªœ¦l$òŒ |ZCÖ©µý°œZt¡íq¡M³¨@S9ÓÄbÌç}‡g~;V‹\™F¿Ýn"›ËO ¥Õ­k}-\]?!øð“G:D :D : ÑÑ?D OSg¨ H1ÞXSMòŒÓÎɱàö#áMÙ1`-&šÛlÁ,xvÿ`*Bê +endstream endobj 35 0 obj <>stream +H‰ì—]oÛ¸†ï8ÿA—-pÚØn²]_%ñIP IŠº§Ø»‚¦(› Ej)2ûë—’í„ʶáLö¤#¥AÑTvf*>œ¯w~ya*a™3öe¶ó?-¹ÉÅÜY©—Ù‹«RéðñUóC³R¼Ìþý¯_¦Yä±þ¼µc.8.¼uû›}kÙuöŸŽ _I•[¡[‹q¶óN»î‹Üuµ~Q¶ó­s\0å׿QˆCSV¦–N€ÏÞë÷“÷§áÛæ`;3É4šÙöœ÷üêŸ`NîÃ|1ºÿððù£Æö~ØÇ#J3&š ˆ&tDˆí"ˆv£7¢7tDu%¸WÌžyf“‘€­1’†!˜µW ónkOȬt«R8ÉÁˆÏ#—¾MtSµpÊ[+ò}U­ØcV-e´O„]Šæ~‡,À~Dk\õ0CΖÂ2î˜:5²NpDÜkwhÎܼŒpn¼åâÑÛ Û'o^ ÍᡚÜÃwí˳ËÍ›¡x±âëјqÁjqdÅ>ýŽyÇŠT›¹“ޝÀ¸ukþI*LL;NÄà +Z¡ý% ÿÝÕ“ãl´ùó­§›oÀìí#"Y·ö!C™ŽÇÁ»æLýáH»¤*cO5Úï.ó”•ÚòÖŠ74­¶‹˜ëùL\HÖ 3Øc/â¢;f¾®%ÓMd‡\u¯àQËáí¥µ¥ÊÃWp=#DtN¨œÔ?†ÒÏŠ¢îG·HÊæ±&rÛø1“z‚ÉËÉäPJîДU¼Fþ?Y=’ +F„f*a™3ðIy«ÿ²2µt‰$íw;yP>±Ø­oÀÕ6~ —]Ïú¿7•÷´gÜï…°GÒÂáYPö-úŽ-‘_ûA¯ææ-$d“lçjnásó¹FàÅNÄÍèÈkþqÈ]èéæáë·Ù(û¹2ñø9{™‰ãŸ­%`ñQ÷3-´ûd™®ƒruÌ·\ÍX­$G(ÆÄÚÓ•Œ“iÆ! âu”$&ÁL…#B3•°ÌxIFÄý­m+µtðñÓÃvr"ìRì+õ¼žý_+ö ‰2•Û,hî¶ãº=L"•CH…™(²iÖüóñQëÚÇtÞ÷僼êËžÊùËoßU'‡sºi³7Êöà²zÖQ¹P1šÅWÁÝñ:G=0WHÞ`ëÐħvn+JsïØ91ï\¢Ë†Q"”ÖB,1³hcN6Få![c**¦.Ù5,(&Ç,Nb­È–§“Q6Þ ?GYø; ÏáßiøEænMjkO·åä¢:¹tB[ æf˜Ì\¨@µÑpDƹ/½bˆµ¨ãC Ô1éö§- +¶ëFÅ[WJ"ÂÊ™â'± DTˆ¡tãˆÔÍsédJ3tbyãAºÀ1-KLÁÝ’XÍ·\ì«jÅà› BØPªš…J5ü»ëŠpëBA…âNÑ´öTpoÁdµËgâB²æXp¾®iï(Ä1óu-™>HFt-䨲j%9¢‰L0]d2ŠJo䦬L4ô™Oô‡·“T0"4S ËœA,·Ä}äpÊ!7‘8RçcxŒ[ªäK3&BtÂsÂFˆˆÑ.‚hw1zƒ zCGô0=?”Qü ©5Ä‘ OÌ¡Ídf¥[•Â!¤âólFÅ¥o£ýayý€ª¥ ô‰°KÑ\-&Ðz˜D*'àU»3E6 Áš>>j]û˜Íûï¾|WB}IÌÌ8•ó—ß¾«Nçt=yo”íÁÅÝ:¬›wC 4r¡b4‹¯‚»ãuŽz`®¼ÁÖ¡‰7NÀÞÔ·T·¢4‰>Ò½'…º E¸Ê$Ba-Ä3‹6æd ‚,÷IÕ“‰FuÉ®á`Aã:fq¢xí@Ú§“Q6Mqݸ}D4¦­=©Ô¹(¤NJâN8+ÁÜ “­‘ ¨6ŽÈ8÷¥W ±(t|¨ Z &Ýà´EÃvݨx•Ô‚%ò0‚åLñƒX +"*ÄP:ŠqDêæ¹t2¥:±¼ñ ]Ú˜–%¦àîI¬æÆ[.öUµbðm!f(•ÌB¥þÝ +E¸i¡ ‚*ñ +§bZ{*¸·`²Úå3q!Ys,8_׋´wâ˜ùº–L$#Úï‚+³ôŽ\…¯á3l(íË5´2F’̓\BŽSÿ7Ñ>4:l§Æ¿9R±^ ¥Ì%XÉåʃWÜ(ƒØöîúQáîÁ ·EÙÈüíz`ߎ¥Im ñ¤•JÁohmMÿQ ÆZˆ%f˜nÌÉzÖœ,G衵1S—즇c7M×´íwº´Bèizb*u.—fz!njE>5–é%¼ÛÇÄÅw°µ§º‚, +©¥Ãˆ¦J07äqäBª†#2Î}éKÝJÌûPAbBŒ»=ÅiË&îºÑAkÁà°Að“ðœóÖƒ +1Ô +ÂÐ<—N¦TD'–7@Ùô=ñ} ÙNêàZ–˜ª»? 4Zà·&î,¾ƒ­=áˆBj™Úþ:q®s3LG.T :Lø†È¹/Ã4IÝJgIŒ|¨ 1ú/ÆÝžâ´åwÝè µ`pØ ýøIø +ÎyëA·T*ÈVx@ó\:™RXÞxeÓ÷@Æ÷d;©ƒkYbªîþ€Ðhßàêžý)KïVˆ0mH; ˜O&V×­±¥¢ú2¡Â§æPp²È…´¸ +1“í*ø>Än=Öy曥íýfn ¹îçÆ[.öUµbðúÐD4ÆTRo"uæÕ¨Êíí!lawñ +·ë´öt:Œf*a™KÉÇîÖƒ¸¿š²2ur™éw#ƒ#už0cÔØR%|ºž#úá9a;Dí"ˆvAôAô†Ž(—EákøÒ4œa¼–Ç–U+ÉŸÜ8VÒ}`R?ÝY̬t«R8Dìžg2íLFtÇÌd„ÊxžÉýÑ@fòÃÖÈ¡ÌeôäÚH†'äóLþ™f2„¥o#ŸÓ¨VÊ Ÿ»͵b‚ü¨‡éÁA¤r^±;3QdÓ¬ùçã£Öµ™¼ÿîËÜ1'ÿðˆe7ùíËê$qN×Íâ«àîÀx‡3˜+0Ú:ÆÁÖÕðzí8 ·oyPXÆS§FÖðDX»C¯éæe$€üQ{5 RBÛÞ-Y(aå"†¦ÇðSûò,Tã…@4¡Ø‡ +qôz̸`µ8²"Ìͯá˜wܨHµ™;éø +Œ[·æŸ¤ÂÄ´ã4ÐÁbEi.à#%ÌR…š¼Š2ÝßÂG¥A J3Œ.¬)±j­©¸öeaå.òׯTPL]²k8X˜€ŽYÜÈ\;PJ‹Ba o VT‚¹&†‘ Ý<ÑpDƹ/½b©[‰c*HÄÈi·‡8mqÀÀ]7*f%µ`‰\Œ`9Sü$|ç¼õ B å£G¤ožK'Sš Ë úÈø>l'up-ÿb¿êz‰¡è_ñÅ“% +âf³>,ÄÄ &_I™v˜êÐN¦­‚ÿÈßáÛ>Mz¯È¶UæùÞäž9½çœ;Å,Ý} ³¡ÉÇ&-ö5oÙp M±Vš%~É +ˉX9eËânY.0˜\ú1’ÌXN´DØð¦Ãé–ÇìJN3©¬¡ã¿l:O5ƒÿÃÓ‹.‹4¸ëu­>ž„õæÙ1ø%ŒODf]×»Z¸'NuǶ*w­Ö@@³¼mXó@ Íàfî µ®GC)ºRD¯/ӿУɄêp;¢'ï+SŇ(X¨} åŽ0,zœø¹‘3eR„¸®ë]k‚‘)M;ì‘“r,8¾j—ãÒ#F)NDÛʨß)NE(,u(±D¤}¥¡H¡ŒcÅ´}}>G]JE4d‘HËÔ^Ì\‹d*óßO‰õüªˆý<…këòî}"(•±¨o,O0àäqŒ&IÄA½ÉJ—³ã޲àÛ¤$¿’Bi"L¾ít†ö ÷Ï,“‚}î¦3У©V?+>ðC‡¹§vÁ„ˆ§Ói­"êêÙyй•\è›2+`Âvûf^B`iòˆµÒ,!ˆpÒÀ¤“F8ñä»üˆ]6¶ ÉŒåDKK´Þ·ép¬jWršIe½xü¸ãI³®we¦õ&ܧ:c[•»‚V«_À¡!Ã<‡‚„HÝCwëz4”¢+Eôú2‚‚œž¼¯F— SêøžEº- ÅŒmia㈵ZÁ¬4Ñ7¡]Ɔ½Ç;ÿ/±Î'à~‚‘)M;ì‘“r,8¾j—ãÜ×#F)NDÛʨß!ý˜4ŠpêÐJˆvOú=J5B‘BÇŠér}rFqj¿ƒ0º”Œþñ'‰EˆÎ¾å¸WýO€&š³# +endstream endobj 36 0 obj <>stream +H‰ìWkO[ÙýŽÄ¸_"©¾œ³Ï»ùD2ÑTI¦šd¦UrŒ<›“„ßµö¹ÃuÓ–!LQ¢@Î=ýX{­½wš?lo=i:>šÎƃáb1Ÿ¼=[ŒOwøewo>ž7¼²gt89:˜§ºÃ6»ßO—ùÏâüd¬›ÝW¸núþêçã³úýñ»ñ‹ñüýø%VwšÝŸ¦“~[wf:<®Gž¨Yo^<ç¶ûÍd´˜Ì¦Ã¹Úù™Oÿ›ò97¿šÍGãïæÃ“ÃÉèóެ8?™nì³î½™ºUc¾C&G‹ñ|c;v¿¿kž Y¯~þî[=úÈÞ š­16ñáxòþp±9—û?_Å_‰s'‹ÃÍ}ë¶ß•k7pì|s§Îï…CŸ6wèÓÝ9´÷ýþë³ùÛ³£ñt4Þw›+ÅÁÎz*º"wçØìí¯ãÑâéìlzŸÎ¾Žƒbïâtó^9´¡,~m,»¸ÂƱª‡7 ÒÅSwæÞÆŽÍǧgG7‘åþ»r.oìÙôìø‡Ñbøa|t¯ž¹3êmMØØÉ·ÃÓñ·óñ?Ï€æ(˵cwåêtöj1YŒ¾ ó+îžêöד£›$õÊ¡;îä_oH=÷aèÚ;:9Þ`ä’›Ì\rOøó?˜$ïÄ©/Ù¹âÒìd<.f›ÏV+'žÍŽOf§“ÅÃ|úŒpóéÃ|ú8tçÓÍÀ‡ùô÷7Ÿ¾›1„½œMN&Ô{7¡úÿƒ usÔ‡õa@}PÔßÉ€úhï{köÿ4=¸Tu-piÿålúܲÀEƒA·þtü~2]ý²½õò¤ÞãëÇWçÇogGl‚ç£mÓìmo™æÍÇí­³«Móÿ±øcšÔ_þysÎÿý¿ýеo^4û»ipÛ›··J+Ù&ß b+¡¤ÔsÍ瘥ø6–$©Ém²ÞÅf øR$6±µøÅ5¦-R¶Íh{k`Ú¢MÒ”6'gïôÁY×8ì+š6ç˜li\n Ï=Ã9ÛF»|k \h]”TÉ8-¶ùõÑk£4NZËR+ÑÛØ8Ñ·$‡ €§É„È׃93µÖ·xßÑ€bƒñ41F˜:ÈmȾD&/D„pPÚ%Ð-„Êú„=ÖÀ˜GoCºÛ$õ‘ +n@r˜ÚÔ–XÄë¢_šç0VùPpÔµ¡ pL{аYXî³[ìñ–V éÂñdig*ÖÈÚPöâÝKȺ¤]Mëõ¤¯ƒEÊ~øCRcq«†_ÆyÆÈWLÔ7:¯`¬‹¸+7D|´ºà€K®8'¦¾…BcMÀgP0 ¢…Y8 +èåxt+€ò +¨ÌP0ý:@1 öÀS¯¶?@é!: lÂ@âœÕ­>Xž+\UxkÀÙpâkbÔ‹c/ÐëÒŸr,†+à¼å¼¨=Ñ'O‹¯…蛟@ÎÍãæÍ_;Ú_­w 7:Žh!nŽé}ÙhxPpÆÞ˜’‘7|ó +µRPxº˜PeLG`!hí‚n]tºƒm2,^†b›ñ)M8®fDƒ)’u‹Ø¢5á°Zëĉ•‰ZõÑÏz‰LPPA +ñLœ¹ZÚ‰æ +?ŠRQ•ÁŠ÷Gj(Ã*_DpŠâ0)|üaY¿×Âu+\ÌÄô-<Ö¸îHßÙu!A¼jA|>PU+â3c“–›Z§ ¼µ,5àPR¨€R=ˆk pÜD, ×é1O¥‘‘5øé…°¦¨ÀFÂ3òµ¼õ ÷H«Tè,=ˆšS`§ð¢âÑIp8…âí9"îPugrÔMð€âî`s%Cr„h8 œ¦ìIÏÕSµ0·9Òk×:ñ¢,‹”g¢,Àe§É6¾8Ñd»à•9°†p$]ó4D ¯ˆºTµ"=õ¡¶‘¿ g½±¬‹B•+ +6D4ùubÉrãY­W4e…~ 1Î0{Hc&ˆ6?ÁƵÊëXÈ”2&›”¢+‰•Ì•äkýaѸP/ó&‰>˜Rrú`©Œk¹Z‰õ` 3ó!À5ÑØ•›…më.fë‹~ÊÞ<«~w|@Ò ©JU_­!«DgÓ².@”¢Öƒ‚¥j—DéœÅþè”÷ZØê‹h„ŽØ.HT³RJÅ­g·‚—õ®H\ºó3UƒµÄ¦C]” -‘¶ˆ‚]Kó20AyZã$×MY¥•Í´&°RÔQ¼Ÿ÷.. ÔfÕ¸Üåïó*µÎ³^ÕáP%’•ïM¬ÖuH_’€tí{l·Ò(®}§ï„j;w$ +.Q‘bÄÎ,˜/4¦¤SQ±ŽÊ{Äd¶FkŸÍÒ §–×$zV¿K2–*§/5 ,ÂÓenªçtêДTþ]!õë­_“k¢¹&èk³6½4÷¡°0‘쯡 $4ÝY(*'QÄMÁÒ(ÏÚébJó3Z +.jw­+¾*“–Œ¨.½Üe ÑÒm”ÔvÙױª U/™N£‘å‹%Ã]˜ŸktÁÔ&¤$ÉFe­; ˆ Òµ`s2 Ö‚'“ Ñuˆ±Ðq%»Â¡—qŒ®ºH#ª9h÷ÕRJU1Yñæ“C¨·3äùŽî¯SÚq]¼Î|}v\K¢€|.•𼿘Üb…Ve›õ\اÌu´ÚcßßJÙ©›Pc_Ë0¡.|Ž×(M ‰h·ìX:h„‚˜kÿ•4žCÆ W´KËÞó®©S¦Nw‰ pÏ­²7 Þ)åw ¯À@«  cmfíæ¨Q•¶bÜH—zBÅÑ"PÅ‘FS‘¥KEeÍ}ÇÖ(7š6Øš žyeDY†‡$tõI6#^”/¨M’á.ªÌ耈:ŽN(vÚ ê¡ÂkÆW^”jÒeÀVÁwýƒ¶ +ÚP°ªW +¬¥ÄnÝbˆFOä lû.ÏèLêÏô”Øìê¦ ÌŠVµ×kI »ORÛˆr @S Ô›ÑÞvDL#çML·¦z¬Ga'¬dµJcþR§|6§RÆyWG±ÁƒÖH2áR™:«Í±…PJ­³WhK øŽÜO¡>Tãl-sÄÓ×&ú’%Á¥¨•rÉ¥ [´ó±£[è8c‡ã eÅ≾òv*Z‹¥PŽ—ÔNð6]‚“Ë…LðT0éRI˜_e½QŒ³î—ªÄ…bÓ¥pQÞ„¤·"o¬{@4"^Á-…ƒ®Ö³ÅuJé‘Î:0õw ’ë`ÛÇvýëK¤hÎR¥W£#aÄý‹örkü8¢ø'ðw˜—€ý Ñ÷îzôš<$` &{w³qX­Áö®Ñ·ÏïTõ_#iD AbA;Sӗ꺜sª’÷¸_¤Ö7›=F¦7] ×#|{ +˪X ¸òW´×R{Î.0ï/‘ó… ¼$* +hõØ<®ÿ›ÿ·þ¸¢)•wR\½º²¬®ÏÕŸ/8 +ÌeUµÛ1 +¬ÑBø#ÛÛBìo{YéåѲ§ýýy¿È䥖½ ãcÉ{)‹ŸRÏJg!é¼XøHräóI|)•/åôSªûY“špgÿ9B°MǬ9)¸š5¤±±zQΤ–r ‹æ½Ó§Ð ¼2&± 2sLëNþ Û˜‹š÷ˆÃ¢äÐRK¶Úâ4 ¤Š€S3 Á,(‘¡íÏÞ xq>uÐÿÆ[·U'!÷NwUs>9*À õÞ"Íž3m@¤v¥‘J:`™N dz±çâöÛÄ}ÄH‘ÐóLWó!ñœ²ÊÚ@4`hÆëzÀŠz*¶U½WÀþ +åÂbD0gÎý­CW›-¦]´ _)&ó¯…vùä+f‘Ò)’£Î¦õ#ékîcö".mÈ™û†šÚ|x:/”{è,„k!ö·îµ×u½÷ó‡rí~òÀºÛ$+º¤ÖDæÅÑžE¹†ÛsÆ9ÛÇ‹È>o¯Äž»VK1}æÄ|—9Y†Izê, ‘“Ìò9wÀκr—ktkÒ¸UäéûšžqeÏâ$•J.;}š7$Kc|ÝM{û`ÙÎË‘Á{F®JÚ§‰DïåPËZÞ9̶‡§žÄ¹È +4Ž,^I›:ÍÓ¨õh¼ÚÓ¸ŸÇs\WúyóøñÜ‘Ù;RZÒÙ´\ƒ\ÜÈ6ÍjˆªÚÉ]*µGE¦¤ ++0C]ºáŠ)eë¾-Íêìeդ鰨–œ¼!É5—¬¾WÕÈcGúrÊóvu/ùÓ¾z=2RÛ­³|G‡(X΋·}õ‚ÐváâÛ:Œ( +–’a„è^<ËôSWÍÀ„•c²º¢eVJQu½úüEJF?¯Þ4[~ß()Ç@†Ê€!~“¡sÃdÛ“!y¶ËÀ>þ—‘|•ååH –Àè4ýÀz ½§>‚žG‰áÕT¦D‚'¤š‚CÒrvçQ% >éÝ“ƒUÐ"î’?5mhQy­¤¸¼Š ž,áËB¿ì†§šæ¥†Š ¹%­Y7Z±¨N 3Ug^«¹¯]Ö³I2Ct i€p—lo°«x[›Žœ„uhl º‘£IgJÞ¦€øº`?î+#ÚÃCce°¸Žy>ýµ{Zºù·#`R\r\xùÄçUiþ6Æ9#„êSÓÑ:Ä<øo>b•¨³O[×fr!ÁNtƒ¥öéYe¨ÅI Ù#û ÉU&Å4u5WŠX݃•úˆáUÕç‰Â„pº8çyݼ_³ÿåóÓ×êùXø»/ÿÓw¿ÿðæ/·7?üô> í»Woßýøá0}þêãû÷o¿ø,¾T—|ó›3¢“⟇’ÿûæVßþȧcû Ývúúôí?Òé [¿ùsˆÖ”Ô]7ç/ƒF‡ùõ_£­²NËÌWû'¬‹Wÿvå+^»Æ8K®üxû#õõ¸í|æá + _ýàÝïÿ^ý ×NŸÿõÇïoÞ¾9½ûùû7?¾ýðë)¡ÆËæýNCº›pcc¼ÿøûê8“T·\_®&={ô±Ó\u‚™…Õ7^ÜûʳÖN_æ±|ƒÿ/“ͯ~úðëOþåôÏ÷o?þüã/O$öAzßý9ùZ’¶F¶a¥ºJÃJŸBÞ6yìEÁ¤ós‡˜,¯]Þ.Ÿ;zú:5ñtáSI ) rÅ22MV8€n Ù3RmåÚ± XЦÐí}@;ƒì|j„ÿµMަ J*PŠ†Þžšûe. s㘚•X¹®!·…÷ð–`VhBkDŒ‹íÒ´Š‹‘ŠrÓ¶v0­yŠèMƒf‘æE†¹â˜ ÎåûAŽð¡-Ê’L.©1›•%…-Âp±Ý4sÍõÓ¨ +Ÿ‘§!D´Z…Œ ë­¢®kÞï#P×.'Ê™Ð=Þʼu¾N»¡`ž¶ÃKSé¾Î ¦¯c$_á¹c ªÒägOßÕ—T‰ôˆìË` å­ìt"H¾ÉÀJrÊœá¡ÆÕ]•ÑÿR $ëÔP.­\: ÅG®ÌJ  náÕMï˜î`.ð¡Ò'ò©YCƒ*¯Sáúžrí•ÝÉc¸CQÚ䦎JbÈÁ >÷ï–Ó8(MÞyxË\Di+’nXPÙ¤þ§¾*‹lQìƒÏ:§­Qå$?¸—Ô={S¡¢¶|Û9®R¸GfT¬þ†6‘ªeM~á1<2K·yEIäRðð%u †é:± ™cÑ"‡†tànß´BXÐßh²¸,($4t"jì£úk?wµ•hùŠ&ÒôI=lŽ$¢Â:d>¨ùÊÜÁ C‰B­ø#Í €•a¾+vBª2QyTÉÈÈo¢®t}‹‘ˆ1–*a7þôZЧЂ4=!—#W¨‘÷•À-F–I„WÒøhA3n 65¥ƒ—&ÞÖ®Ó˜º‰„«1kW_žqÆS„óÚ›’Tâ”É”)%\`Ó4ËQHJøPp“Ј@W³8˜ {´Lƒ!Ïm“¼â XHc=‡­•¾|Q¹ë†1Üà¥ßÎ¥.[R˜°!qãhBkzù* 7L/ Ü»]™jÔ8ü¨*5Ñ÷J,Ý0Rö€.ê56i´Ðõ»óÞ‡6®²µ>²o¬ÞÜ4 º,@_…ó€øë¸Þ'1l-—xÙpø1©Ýæ†%ÆàEÿ:š[qŸ‚jLR£=ë3ž:+s+!쾪t%Y/›AôSx:}´ Œ ŠOIwñ9l…‹¼(e[áq™à´r¡QôÈ ) ŒT%ü£;<íi£ÃTC`8'Hò.Øp*¹NR0ìÚÆÓFlÙe3Ϭª&Vle9Z‘°<½ðáxÎôgÍ(1ÙC§¬Y,•§¦é±$½V¬7-õ.¡]túµéÁ4¬&B¹ëó_U[Ù&1 ’Nr¸`€tÆòYTtÐ{dÝÝdà­{WÕˆá¶5“ÂÅU 78z¹''TdáÌn6ÍÉ^¬ÒikA^"[Wu\\¥¯%yê»Àéêùµ6´Æá°z_' 0ôI ›-ÇU^KÀtj3üYÀà Õ8†”‡ú +½#´O=­ F¢RĽ™cÇgqDÖ1 +!QƒÚN•ŒS¶V’Zåä†ÊL VUaHÔdìâíÎQÄ»“4ɘ¨džË®(œ£›m  ¾ªN6žXII7¥Ö\t‘9µ5Ɔmi²Fn(¥ Xq·@—ê¹ïº5uU1vw)(=[ð%l($AkÖÙVe^Š‹´U¤J'[-z…õP‰³”šp¥P—^…1Í9]Œ«ö•º$ÝÁ´p%2ŒAµ„¦"´/0±0КˆA–Yk` rÌéZÆeÂP)ê±7šb(ÝÝÖ8/qÁNù$ÛÆ$V¡ÒP°q²‘ºÝ5o'_Ú˜Õ15Ÿeµ)þÃ{¹ìÚvQ´Ä?œN¤ ÂòÛn&·|@„n#B‚)JþŸ1ª¼O"= Âq¶—ízÌ“yûU›½%÷º j§{ ¥“Ô}d;kì·þ<Ϊj”Oó¥Õ¬rˆvNuÖU¾¡ $D€ÑdÿŠp•dÉü ¬ùaßI<.‘´ÊÚ¨2a#8²ÇÂ6Ôö„ÊûÉ]´mBÕYGny R×¹û•eúžð?º¾¼¬ô2Nûòíùñ‡¿}þîíÃ7ß|ýéÓOÿøó÷?þÕùö»_Ä_9Ê/¾õÐÖ%[+c„Oc•úÐìT†n(*Vä(2îMí5òr˜>!q‘@2á «v ‹šS¦›CrŸ°DtA쫨]kõ–mt-1ãWVý:œ +Èïk|¸çÆ€4´^ûLVì¦ =  °š²ÿì{œ.¡&…¤êPŠ“b2³FœvÒY‰Øpѽ¥‚^y L}äI¦b6®æâìœ&4L“®æç¾ ¯0Ë“_ïÇsiLFÝ(²9Ûàðz¥±Õßò<3L¯wþ–½“Ã0wŸž1ù±yœóÚæYˆíÉQ)|J^#]Ú½¦6´FXJxN6ÕÌç;AOB#°s3ðÇÿ^}–·_þþó¸•õù“5û'ÿç»ßþf¬AH¹Ì»Z©ñ%0 çPÞÖÐäСšèò¼fee¼é(éHÞm©:„†?D,꣢hÅñ1âÍ/‘@ršFKŒ‚šä‡•Í[Qrþt Ç92RÌô Áué/Ƶ+¶Úµå“JÕ¦z™õ±‰ +ÃŽkgMç>N®ƒ–¼yK–£N¿G³ …M àòÞ2SŽbõ2µôYl•¦E·sîB ­J·ç°o]*º"½ +]C((‰Ù4¹l*ô×ÃEÉHØb¡m:pn_›Xê‹ /¦+ ¿ÛIXA’õ.x¿E¤Ö×.FìRüî‡!²<êýÊ3˜=ˇ´ýº毿-B»F>¢è\·ó[|R1@£·­ãYw`™â´Õ‘"•ŽæýrWOM*ÜzÒ©&RæZЈQZba,.V@Rjh¸^$rE"Û<Ëù"#8©X1ºé{ܦÞüãwÞV wÿOœÜsâ²NåLD1‰(ï~Py~¢úXÌX`xÆBpòRÆzÒ@1–„ú¸‰"pßVK®ô£Qè–åLàòð‡ +2½„†š•P;²4QÑÑZ¼¨b¬‰`õ‡ÞÈëd ˆad]2 ¨¬Ià{“ï±PKKKbNÁ'zçqä:üh?Š+ nŽÃ…øªŠ ~n _Y:åÖ[Ë1Sc¢SÆË›¶[€Ú]2°öÅ2šªì«òî +¯c¦æõZÅ8«A¸¸RÓ$¡Fç±µfÕ”$ܱÖvß®ádgü(z?&ÌjƒÂ¹ÃŽ ºàÉQÜóhd›Ÿ"ñÚ Ê”Yì罫BøÎÛϵTĘš"»Ü+#0V,”6b!ðSRÞÐXÅ~‡Åà#o:?L…ŽŒ…Ö@ŸØµ2Ëœ¿"Ñ ¾¢0uÁõ‰5b”ß³vE0\ÛW«¶35®Vu2—’›NÎ3צjí;ÏŠµURR)ØÆço€Ž†ðeÄÌDh&©!m™šîXdá¯Zq|pi’¼AýÈ:•Þ£ ú]˜V“µ#¾*ìAÓ,¨ÁU³ Œ¥%7cÁ4ÑòÔeëWãXx‡ÅËTÏtE+§Î,ƒLs{€ÝÎQ6¹=‚­Ä ì bD¿™tÒD“^z›aoW(Ú¬!î‰ÎšQöÉ“B­˜üÑÎ"åžp÷‘*²{ ¡m®.R|ô½¯:¥•Ô§§GK®"™Ÿšjº”Ù5ÞE]BÔÂ)Æ—·]—ËJŸº°µfµ¾.¸ŠÍv]£¯" -•¨Œ©úYäèÚ¼N‰XM ãîq8G9ó;íŒS:®Í'ª¶GÃò7ÙB¼šiÞ-íÑÇlÆX‰¬•¥•|v©‘n¼]¨ô\õ¼Dv6Ru'~Ó4Gϧ\èÄ ;Gymjc‡,†‰…Be«ä0M,DI(Í£¿Ì]XR¸L›ˆ….¦‚cD9R#þœöÝ:ï°aa Ô+…AÚ¸,MËel¢/†Ö¯õWùi½ÀÔ§:æÔ¾ bôð5 #|Zb‰›¸<Â&Œç‹e*ÆGÐZ­ÜÅÈu<ït=§¡k [ȘÂÕ×è("}èÌV„ÈÞ°×ê¡åËÑŒTã Pê ê§HjòØÈ™ç Ÿ6F‚ñ¹’«Ô4Þª®(ÝÌÂÉV OÍ./CI¦DÖh5J M0ºE±¸C_¢0©^zUt‡úc7†NP9T¹‰IK4A z¾Ü†æ%ÅŒÒ 8íª½|bÂ)¤rÖX¨:8G׊ÿ$rZ«—z¼Ò“£ÜÓü„ÕGa5‰ÑNå©2žì8î}漜3µSø‰æ5ùíÜ9_HJ < F†Ïòú3î&:a!9M+ËÍ«6=HÞlž¼dÒ„6 9Ñ=|\µëÀDâ-3뾜¦4óÙ‘ÖE£²™ÿpiGPÃÈO`†NºOO}\˜³é¯ÔH,îýÔ¡"ÐtOø«H›Ú£¿aðWþ†Ëo®qpg€T6dfÀjö×&’Nbø?üQó¨3MU?âð‘K·«Ð ‡¸MÊA»éçíœ*>ßY&2ú¥z "?’úœfO÷ó \L³*E×IoP@QÔåº, +3&x)Áý”BOHÂ\ðÔ²Oa• ŠnI٘ķûL“q,­ÑcaO¢vlÔËV]Äåj¼¨Š&u÷<–ÙçŽÖ/FíÎjeRy3Ìl€”éÄ<Ê?Ïâ½>sNê4®¤’¶­]â†Ãò3m+$³èø8kô“gèCõ.α°wŠ I¹€¥;Kaæ$t­óp ëÍü{ø¾vÂæÄ´Ž'!ñnb4Ï(¡§®8ê˜íh4:ë›w¿Q¾ ÛÖY0¯*sÙnÛÉHò¨5&P:«E +ŒQ£)!2j%VvWFV2[,< ÏuÝh¸HŠbCË =¼"]´æq)…hOêð1 +2ã’BtÂn Õ`žÇ•ù]^q)êT¨Ï(êuG OS—äÃé/H×S®@qêÃ\>¯F™'M_•US²fÉ|çûyÅPkˆB×ÂËê!ågbú˜·ó¬×®ií£2ËQ‹J}!kD‚ßz”Û!2H”·%_ÈHZF[³1LÇ´(V[³ 08ƒzî +ÈLNÒžþBj¨½’¼Œ]íÀà¶jÌc¥|Ò$[%! ¢=gá3Ži.˜|Á&6—Ÿ&ù¿³·n‹ƒÅlƒ^CŽõ¬+4_U¢ *6²aN©ÉmªÎë±*‡ª ¬ÅO+B +ôž–Ù;¹«#¼J±Í[µcjˆëTÚù¹|aåëšU§_Õ"Sù1qúéÅIÍ’ÞüÞcœ6•Zè&ßFº·‘ŠsY¤YVHR´˜ª\I ü=c>­ô[­Û=l(êÁud %uIl¤Ö 36; +ˆºû2{›n5õL›ñ®£ØÈcg™7ÔïÁbí™™åæÂ Í홑ñ¤uÈÇ3ƒ”5l©¯×Ôu÷^È<½60|ŒÞC/k‚$Vh¤£²'ê9UIªµŒÊ´sïGH…²Ú¨g„sÕ¹ó¨Rô˜›Î-«Ç#Ù66a®è‹¶cMváxä¢Z× 2lö †àšÚ#+ó©Bšè©„qƒHJG9lÚ­½Ñûã´j Šv¯z•4‘P7YéVcGk$¸m[C[ºÉª +CD’6º%m¬^!±3Þ¦d×^ºwëÇx$åé²wÌ`7üÔí}Ò¯gò†ú·ý¬È±Æ d9æ>Þ#ÓhœP÷—T_Å ”§±¤Öœm+ZC’‚ŠãÕæˆ÷]¤ÉÜÓªŽ Ä Y; Š/ #y4©4³bÚ)[‚Ž åØVh+ £G¨¾~é‘—ºGÇ›é%ÙØôrÜ[3044ÜwSmñÿÔ–®/§Ø¬šA¥YNRst˜A÷‘:ºê`•Tà}U‘NCt…}«ò…§¯–3%>Éh·Ê}ô¿¾™²0¥Œ@59 ‘Oä"ƒÐZ”1»:I=8Kj…ó?²œƺµ€©L»Û(IúUsFÆæ‚s³½Édõ”1¸0­Uañrb@ZM—³lÑTATglÜ ¬`‘:T¦f.7>æº5ž´Üé VT+c©|ècõ›ü¡6—ý¨ÊQ–FÌþÕ—’ÜC¦LÔ%è–jW­ÑÝø¨a% ©K”*mW¥ôR¶²w£«yÔÔF Z/Õ}D‡ïxŠBUrÔ»kâù’![£§ë¨æ»ä ƒÅ«‘ä°è‘ýÛ^?Uÿ>b@ï`êw•6³ ÑŠ”ü‰oZÀ£áǬm¸†ÌŠWƒñþóöÍ?Þ¾ùÝ¿~øðõÛŸ>¾|ûøåóÏ_ÿ{ýØ»[IUSíúþúáïß¾~üüËõîÇ?¼¼üöéo_¾ýÌÇï¯ßóéùçí›t¥ë§¿}3®wﯟþIPgæº|§†UúDPx+¯Ì…†sÀä*‚àŠR‹©-2 -4²txaQºY®i'[™¢´Ú^2eóØ +”kîõ³¬à“0Ÿ:t!ke,ÅæDõAo¶ŠHFu¶?ÊVÖ153Ϊ´÷§Ïš é+… ¡%ôêÆøeH©htÆ^Yú;n +…vÎ;4‰ê[¦qM‘Þt!×*M³¤¸µ.ôŒUëØ$©ö`VL•~?MïÖh$†òœ¦)Fé²"ê°5Pvs+Ú íã4b™tì:’~ìnTs³­ÎV ÔóCîóõ8•dUÉ˼¶ð»Ü +`mƒÐâ€çuèžd.]¾½ÌÒmÁÙG¡€s›ß_×2ë7†áQËd„ÂST„ + ¶Xõ—ÿß äë݇Ï_>«t5á7†ã¯üóËÛ7·ÐÊž6ã™”…OÄÔ,ªÞ%i¿4בBkÉCƒçk`›þoRÖŒï­N 4€Tð±¯[Ë;ZT(º¡œº/ÂÜ’¦€'„bVž9çv¿†Ú„ì™”ÚtX@˜×Q(FO¤ÕŒ0Eºd1é·ýÁ@q|&Ì×¶#}u¨øNïTÁ Ì%Ä]á’öÀ$ €äS±-Õs +’Ð*ž\­3 Xœ7­wÃÀ"ÝØ™n«:eRúõºÓ3Ö©=y×­za¶²š¨QI%ÄS@[_{Ä$*[6Jã F䥆Ï\î¡‹XØãžš¹<y‚²6½lM˜I¡¥˜Wk úi~ÒÝËÓlu„p¤Rq4”·¬ûÈVuŠ”Ic:¥v4”Èlóú]a"Ìíéé3=^B$ߌ>ïa÷„U»%C‡qFè˜pr唟>n—i×ó6w$s>qºN Ôw wêÓÓJ\ںߦØYÕÂa€sÝ)¿V®0‘€iBó©¡â@* ,«.hÒ%’j‰’ ì&gˆH–‚Ô_ér¨NÑVh¾jÔRÅFŽ…Ùf¤WC,}U1 bZUŠÉüg”¡åvµ|%- áÏÄ4k+˜¦pbŸL=AJ[P"ær"}¨?ªtœnS¼0éÎ> •ð Þmq ELEþ ýWqJÉS„Ë(¦}‚zOsi…Z->£ê·½çôN`Û³¶PŽ/q`ªÔCÁŽþå^µœÈÀ:b¿ÀN6N^¨`’Ìtp/¿p/à Ó³°DFŠucÔsQÏ^c +|~ج½ùEöt“ù[i¨¥œf½óÇê¯õÀRÉÑQáðGѾ:ÖAµC-ËõÍ ½¯X£V·@+˜Ü‚[LÒ%¢Sp”$- Up¹é2ØSj4©{Ó¹€§û^OS‹L¡BÁåÙhcvšâ×ôI¦,¿I'`9‚Ù‰§ÅûˆàXj€Ã^úáJ@"íeHЂ`€ºi¼GŒÀ¹^Êv·aóB!€IYÅ®j/0ªa€Š§¥ÈtŽ:Ji= t&]i7~‡ÞRÒt&Vª’~=êCÊÂßLÅB„“;ãà¹`ÆT¬ÖÓ¡ÉYi9nRþëå²[×qѹýÃ\«ßÌ$gâIb80’™ H”MX"šäﳪvÓ àQObªsÏ9Ý»÷®Z®Ë¥ÌÃ:YÒ×eÌH×éÛî¹<ûV¨J7†´© ^â»9í?Fc·?Añé®% 䡲Y de"%=8·Ø¤èíã\9{ì+Ó>OÁãÕÞY‹Ò+ÇáZA8W?‰âbPø.P·©VE$ ò©äѯÐPÏ¿ùI`>ïA(ÏÙ@,Àíëú¸Y½†èÏ£Øf|-ã /ÐÂчa§MÉ+8m<55—hÇ+nb4—™ˆ!:p•‚¿ê +k‚¹ÞZ‰² ÓxQñ›”˜Dr]6ýìfôïuç›[o!ÜC¢¢Ó4:Q‡)f SFN%#µá•ï¤Å"r̉ЏN|Låé+ͧCiFŽ5jÜ|ç³ =Ñ$ã Þˆl<¶I½5¦]‹bã e¾Á÷¬€VkW#Ûµ\bÁâà0aü‹v,Ãm ªOÿó/´)•z@ŠÖ•y¥Ú‡®È{+ÐiS1cÕm;7¯âPÂÕ0'\Ÿ‘˜²Šî”@fP¨–å„–><Š9â'ð¬Šð"" Ú7ë3L˜¯÷*ÅpBäxáLJð…x]€(}& +M¥cÃ>˜,F±ó8 +³.ŠÊ'’Ó’%øÏ É×°;¥WyÖÐÜ*®ÅuÒ¬Áþu0aˆðÝB[xC,éC ‹ß…K¾ôéå‹ÿ¸p›.éòÏÜâåU„Û¯Þ]·>:g¡•¾Ä*å€G/Ã^íX¨vQó”g—\]a‹TÃÐs"?Ì +C/îš1’£@h䨣2úÙÏ ¥0¼ˆqÛ–i¥ß;sŠÍ°%™‰Ó謴"åõÄ‘Ý"ë^«ì.é™RT3¯Õ˜;(jî eËza³½ ä"K"¬lKÉ(µH*1ñFìÁªlô¾ÖúM¨­’¾Y„.lÐ*v”fÓøñ)ËrÅAZÈ´Ì +DÚú÷@ÏÑR¥<-Jr¤¤½4.@AŠA-JbÀE7ž‡jž}u¥iÀ¶ +¸[`o›Š&娙¡ŸúTã‰ej¢›Sf󻬋"/Ì ø¯jÒýä_ÿ¸^Í—Woîîï.s¶èÕ'õïßô??in!†¢\¡ÌW|Ñ-©h¢ +åæÌ¡|UQÀ>ær2)êõ¥4„D©"/÷VO.ÿ%¡º„‹î$×f“ûшuDÊÈâgx)™JháÛš*Ëi”c€B u=VS_ê¾õßek¤[êN¼qCÉyÔ‚ô"KX·¥5õ€P3O\]–Ŧ4mBÍ*áõ‚ÍÖü%3t“òjPh)[·ìYj.ò¤Èw5%"`J"óNѱ3Oo©óí&MÅ +6RHoIЉD~FçZbÇ ~ú8ÛŸ9´:”ôË”?a4TYñÈ”l¸u)=BH‡o“—Õ{‡•K®be±Ïìü_´ +l¥Ê~R%¿Ù  R¶ÃÿÀ{ЫŰrJÁø¦\Ž83[œž3îÖƒ¡EMzЗ`ÊR"ªªÙ?ST‡ªR)ÙõðåÎlH‘´EÝ„Cæ(c_ª–Æ{ŠvX;ˆº Uç±Ûd·ƒ!Uÿ"A¥µ¥_Ï ®öQluR\=࿪-« :µ@ðÁx²®Wv ÀbŸ1?^ä»EäXˆŠ­© Í¿ÀÞñ¡E¿rŠÒA-@mšÒ4ZHûJÌÊþMo-v‡Y«8ß&XÓðîì2ðÚìDh‚8ùA~?t šlÊCrXW•bafÑ)ÊnÖ1êrÛKÍBÿVU­ŒdK˜ªU÷J®ÆEmFÓÛÆªˆ¦x‡¤«€SvV•kàä•ã9áä¦&¢â°gånhÂÝ¡.3çn÷ÿeJÆ«|‰BÅÁt«„K¥•¸ž)›Û^õÜÅL§^òÉ.j +CÖE%²¶@¶R–¶€F@âf9?§Ëèœmœ×$i!rl—ÒÌxŽ¿×(Âä±L?cÝ¥·ÓÉfkÑEÃ=Xªè +FŽÀZ´­rÚ”3ÜêPÈÏO ÷ŒËg÷« +$Ý÷ ‡ŠLAH)j7^¸m¥AÞŒvT×kÎ{K`¦&sjeŠðŠ[²Õ€±²ˆ„æ~*9²¬³gµ3.ŽÙh¢Š3ÅÉœ„â) r„NÕJî.@QèDSÊ™ÕHHŽiiÄt"LͩӴvUŸKãeá£ëU|Ø­ì¥]ê$¥ÝÖÌÝ‚±G„[­ä¬=ñeÂçÉŽð@A!–«W¿{²ªfë0ó$$LºÖyž?.¯â’{oïÕÊ’èe7çN£ÈUi¥Ñ×Q%„}V´DÞ“x£tn-mE /Í9Œ‘dc7`îÓb…Žé™V8HŽ'Ùžã®Þ·Ö*QÝÑÏWk^{•XâùýüìØŽh¬âÖùò9Vl|`gË}QQÛ¡ËGöçÂ/åT×9¸²ÊE¦¯b±4+Ͱ¦;½¬E¨¡ÆÅ2»þÙÎd#ß}¢÷µÀ·ûÛUÌâ¥|6]·ÆÌZä}—jJ?5eÑŠo‹ ™GÒ¡D~ ¤´ü°bÈŒ§\lILŽ­¨‰K÷ðˆwøq¶ÎQ g +ç +jÒ°âKÛÃêØª)Gþ–§œ-J0šÍòÀI£«†¦\%å÷ΦR¶¬‚HiWÚ íJ‡õÈÕ³[sD`Tš…lÄ!mUñ–?Ò®¡€¹DÐÕb|s@2²•¬×ùcÕ&ÆÆ–ª×() []ÏZPˆà¥[Û-ÛjX£4>,Xê=Fs¿,'_~?¢ÏÙùòå/ÆÏ}ÜR:©ìf³³`(m?×cÒÍL¶C­”ƒêU„”úhÀAFaùlÕD[» {ÅȇD¤Z³ÆîŽˆ˜ë½Ø±ö4ê¾)æ:a‚fó§¬1)œPÿ5íÒUÚÍ«ª rEwðJ ßeFõ¶ª! á3æXÄ[cq4¾8®Bc§°5¦/Êà÷Pƒ-l ‡¨=¯¯Ö¥=DŒ>ƒ±9Ð0#ª8¼Joät §üÞXMÈÖ nP®Íèʉkb ¹ý>i.z³%ú@Ÿª¶*›öÀ3k™þVxaãÒ%3¼ÃXÊ™TúÛ駕䯈ìH@Û…Î4Äå0 +@“m5‹zcV=/ÊX+ðµYŠHŒ¾¼G™N’¨0×´óyÁwŽ Ê[B$øw÷ ðÓ,pà$=p¿ËVkžPͶOÏèÏ‚ì÷g¼ûñÀ}þÍû¯¾ýÇ?Ÿ¼óϯu_÷õå_¨ôë³ß\_þÉÁÈ«ÛUP“¯s˜„¿¦¿ +ö$å:Qdç¶ +w¢tjå}DµË4d3Äj‘õ’Ýú¶XièœÒ +IÐÎcŽ hU ù¥è·…¤žQXS˜¶C…ÎPû¹ÖxQt±ö©Ôv`¤¦ÙÆÆêÏ̪] 6D:‘²)ÑvÛcë q2ÓŽKc™@Ë3Õß·Ô WáDzÐ[QD™Ó¹bß Ú³ñ{¦NUSa ÙÃNˆ´Úºíê +zùÎÓŒ%™ý¶aB²º‚æ{ƒk%Ÿ]ß“ñ~žu¯Ç¨Ü1÷÷ÿ/ä_\!—ë³Ï¿ùö›k­‘…ü/‹ûþùªÄƒ í²MaJúÕEwú¥ÒHP{ý@°[Љš]+V sIÖ&¦RŠ~Š>“f!×Üx)Z[x$æ¬èÇC~0/a–ZðCY¡§Á6< Ñg*x/'ñSµ­ÑëKñ‰…¸pW×#˜¶ä ·sIþ$^‚hÂ:Ð,¤ˆ¨ÈŽxØP\·P¥èLà”bGE¬X ÞV‰©ˆÓ r³ŸØE%f¡cƒ˜—~HY*O£ÔP…1(RËŒµ@ÓRðA{•ƒ÷²¯D$§ ç¯Ö˜˜¥„è>^Eo¾t´PZÉ>R3PKp_Ç9w"%ѯ]fŠ)V„‡-mãÕ!#ÕCj_ÓÓfDrø…bÖv¼$*§—ð{ßÛ[a +"Óijmò„½’⸡V¶MÔkoîxÝʺ¡<’nÄ Â,GqoÐMŒi5Fȸ|g·g^KÁWùj»¾ÈÁ=ïÔȾbRÔø˜ï£šŽd¸ÔѳÝ»¨B.ãíjΑúΉ5½@Ãë?¶$ƒÖgÇ#ã|EkÒã¹\),ŠªÙ¶·{ÏóqïÀÁIád´Š•Ÿo¶åNU<Ñ‘¢ÏôʪÈ‚Ž5¯z‰Ù-MÓ¼Í7­8ÌHúu‰n{jÚMÓ¨+iÚú¤RmaÊ¡s6Ï\߀ÂJ¦2¯õR£-¹½¶ä¡¥,Þ m­W›ù¸S6ÂU©øüú2’‡Z8D¸k íšÒû »ÙúH»I‡¾·:a!ˆ tѵŒI(ƒÊ£ý»Ÿpb­¦i$úl7e‡x–ô?/zá¢)§³)§€è…ŽtA{ª>úZêrzõ—Ä!nRž¢ð"ÃRÀÌÄM×AÖ°)œ¥2¸W.g·Ò÷FàyÃÌ×ì@4Z1¯iÈNá[½Ñ=2ä÷CMZt[k8zèsŸd;þ¦«IÁÜ+ Õ¹yƒÐï:¢ à){œ‰ÛµiJÿqI-ú ™ß+ôp¢ßt˜ˆÓ KhÊvË2}iô*±ÕÀ9C³„Q#³ZÈ nqiïcÄï èÊç®Èôtj¥Œ§öÜäûh”_ü ÖgþÓnÿe—\6ëc<öBù“Pé· .c{Ëü»‘Q +ë–œEf«9PvX’êvÍ–V±1ç¡w“»µ4$‡œ9MĶ#p×¾3 “ã×]³ó×YT'ÛÑxs$ Q§ùÔ=Y‰.¯^°½v©CU£”ÍcIn4n»4¹j³-pÀ ‚ ÖZ³5mE1ïå®?¦ÿâ“î;"ž?DÄJ¿4Èÿè}_çØ Ì:V£A5"æ_4ŸA€í@HJÈ2[hSŒu•øUO¾Ý¤]±Žgóâ9»×ÎéõµG `+ùÎ=ì×dU-U/II$|URÏÌìÓï$Ê%µ0Л9VXéîñFÊY$lð"K ¡J2k@¤¾t»5E4 ×êõ~féšÐJ6ÄdB€°O^¦xn‘~Ê$±`OxºGŠïÒÇ,º9ÐânÛ¦ Êãq•ûný¡}¬Ã°›ÕØaT|§ñ¯QàÈÕW¶%{²±øV‡I—Ê­‘¢—ô¶¹om‘g(.¶EݵYc(ÊÔö>Q›‚C"Ðí\¥öt»è؈s¸Y°ÞqlvÅ Ä$9 i\l„ç•—Í¢8Î^6L§§'ÊÚˆÉÀ!Ó [77T/v|ж¨èµ¸Fx'&§ß¹m !:ªr‚Ý9bDhÞqP<È3 59œ‚‰—vqúHV¿‹òЕÉDâ”d÷ É]„‹Â65ÙˆÂoa.ºæoÓÔG{f¹}’«(†m¦EóŽºøÆq‹µ÷úpIÆkn½æN@²i°ÏfÇf‰¬Â 4u:fÍ cà_ LuÞÑFh‚I¼1…­˜Åîµ!Œ-RgUcK\‡XOH’œÒß¼XK†@“Jß Ä68¹o~ iSöc‰Ë‘ x¾½žÕ sI.¥H }'tž:bõ`mž Ùðù.{ú|ÏÌ9VŸ];y'Ùo“Õ¥H}Û~²•ç»$x2TÛˆû gÁ3vâ~Íd¯¬‘ðèZ¤[[q«Qp<¯Nòò¼·S(H½ÄûØX1öëÉPž›ŒÈWKè|ÖšñüÝŒÑb«Ëô’üG…¼‹Àcoâ(g´ù,R’€4‚×¥k⊷n@‘òûŒçN"“¶6âÚîj¢âZ­‡çÓftöiÄÍÛ$Fœ’£¬‹LØ Üë`õ„||ž}Æ*fÓØyŦ1¹fuòØM+2ìë-±²1VÖY™ü÷É$¬Qg^r‹u𦪖re´—4£Ü¢5n© ”fÔŸÇ¥;ä$feÔ”+ z¡}^øŽ?iOÅ>²\jÉf¢cÕzÀ€]!&ÜKhÇ2õPœ9Jÿ SÊ*²ÔQ?Ý ïp­šbR溆*0Þß‘©†M…¼T!+vG‰DBZÈh0=Ì%ùHGÚ¦[œª)š^^¹¹mФ€‚$}ªïÞ4IäÄ;–šØY• S˜Ô²‘#ˆ—z¾B}{zAäz‹¶ƒv;`FÆÔ»Znóc ömÇ&'êpPâƒed?¨twÕìòl*ZÈaíS=ê*Ê#d=šòÕ¹[Q¿)(–oê•¥Þú,@Ø›f7Ÿhó5vÌvªw ¬PxX ŒU°…r䃤“HA–Héò,eêBy‡=M¶É¤$&³ïÎ +|¸lÍÇ¢Ž•ÚC8+5 ꨘ¶+@‰x90Ñì`©¶gÂU-%g!g´>ÇïÉ‚‡ˆó DÐà2zr[q¯â_˜PO«LíNA86õ%ðôdžÆ~¦¥pDyV;ŒŒoŸ;â[W£¥ôm[~˜ëŒ—:bêZ€Q‹øžß'o³à¬dÑÕs­sr Œµ®ïìï>mU°¶±XÀEÓí yã¥øRnÂ%/ñ– :‚4×x?ŽŠ´‡–Özú-JA[fó67«˜8¢¶Ö¼[N¢J +Z×DK:Ëð¤sìÌèeÔ̱ú ¶²ö¨ï1­$Òuª·¨ˆrª 5irŽÌçœFÚ+ÉÛVÆB°í³ûn$‰rôz;®aS!«ée²ãœÉ>`E4’N Üyêù£÷Ó¯ì;Ó³~Èôô–(fyâJãú^½&KÇ óâ +°¬Ïk{8rÂ#daÖ]C i¹¯ó^îX®í6Š& /þ@‚±§ó<ÿÔµ ÔuðœÚQ·¨C(ÔG…’Ä‚þ¤®Ó+ÃÏ'Œ¬,ôÙûDï·×cÀ&š>÷ö8®ó­ÜýôÙH,^\ùÉ$ÉOåòWx˜¬S©_£6_¢îƒ+àéù®.7ìðÊF8_ +Χ€|S€-{¡÷tŒÄ‘z¯óuEâÜâÛ±B«iWýUÉÄ:Á’,¢$_TPºV•Qšðž¤ ?o'<ý@w¸ú6,Úðü[,Km‰“œ]ˆl0 1ur\1Í’"/d~y;vdš¤åÝÏP±%3tª¼˜±es"ì­ø¸ÃOÅшÎÕëE:©ëZ–Djú8‡Þ>6v]}™µƒëiø¹CGVrw•¥ÓÜÅ:À +8£`‚î zLn¤–d:_R»5Ç|¶_ûÎUâ@Øã~’ƒ:¯õWL–RðçÙ7¯Ö©ž€hmÁ_©|¶z耒¾É´Eç•3øÙ¢0ÄõÐlSÅŒøMdXYH©­í`A?;·¥Ô¶ˆ[ß!jìžä½µp *º‡ ¸8Ût)6æZ½£4S¼w Æ‚TvbYly¾wÖ?Ú%æj{„×tSQ­ã]w–Pžß¬\ôkººNÏVk2a¶‚*gáøú1Pðk£ì{±FRôš˜ñó—×¶ƒµTø²á¿½6<«¾&hÍÑ«R¯$P>waÔAeèÜÛD›ˆúÒšŽÃñöYéÏKUbÁùÛT6g0(UöµwK:]SÕ»UÐ%¨ÚÔ· H…ï¨ó9Ð ¯Ê^-?ÄŒwfÛ1P…¥Ã¹ñÑŠmÊ…•Šë6ðÒ(‚‹ãù9éqH´MNðŠ± ^ÑO!Y'ÄÞ}]ɉÀùY@ÚN-(ú¤‘l—É&¹¤Yì—6€»DÌó`éeóÐ7³à~K€E÷”£¿ˆ%Ç~ª‘(ÏØ8*Sín AZò`n8-ü“ˤˆ‘vá•zîºå ük×ôá¥kñ*"Ÿq—!ÖCR–º†$,oã^H†¤]×YJTk¸ýÚ…Áj¥KÅnRBÄ‹WKŒ6—© t)DkVálsáŸ7@BÛ®½k×–Éx€$µœ É@*ØŽðˆC;Ug÷ý£ +w#nBÁóŠ7ÿ Š‚†ª„ÖõfçO£Õèܰ¡»YÂZε +µê "5cSà Dã§|Æv­kaúI{³ní³ +“S,ªBN¯b|uûN¦¤7Œ˜¢à8õ·H¾“¼à„úgÈ¥ëVÇñsþÌ$ê¯æõ2Æ¥÷þÇŒ}Ëhb>x)tùô «d£k?pJ]¼ø–Ô¹¿ž£:À¥cR_.¡}E E©ã•ìoEsTþ~QÏ·è¾ó˜\TyÁrÔOõçRÔŒQˆ…±™Îµ¢ ùìš›û7Ÿ÷É ÏTɼp y9‰­$% ÉŒñó—:yz"EÚý<¦¼SÆT`0[ª‘Í’ªMÕ„íI›ÅZƒ¯˜úÛ)ª'=LOt_*o#ᘢ¥Ãp,лhÙûD:÷6œP8”®,Îæ}ç +Íbž³ãã~ÛÑ ‡"åmf!yfQºlÒ¶hî(íÝÒðÿ¡óÏÿâüaÕÎ5³•ü—ÅB1azR›ÒÉ'°·40ÅûwÙŽÄ”ë—&ÌæžÈ† 2XØcÖ„^ ’dDÓq_¿{YÍ|Z1n6&sš÷%õñ9r\Ɖ'Û§&X@Ÿ,4\ðbn1':itì´ µŸïÑÚ@RÔ+ç-ͦ£z7ʶ9kÂa2xQ$)äÉ[*Øàt'A¨hkžGÝK.Áºú•†o¬Ë áÍmþ;4^•Ó»ö“(íJ8é‰ðÕ+” +Ëbç%úp-ðÍráÙ-Ä]Q³%u–™‘QK¹uÂÆ“û£a hê.ƒÅej¡n¦_—ë«(+ +â·W «7Jø†#]~_c±wù`ÝZæ¿ÝJßC­J kíƒiØd\ý˜†']„I`E ­:™.߯3ñ\‚Ðh¢VôÔq}gÎé3(ãèÇ…;“ã;êkž¢ÀîÛ7 ˆîÝÎÙ‰~¡ê1*oÉ®›Ïe¶ô/çÔ?C,$q¡·ö숞YQ¼„A ÈPA>%„ûõz1þú\€v|FçM6Ígtt·)šü`óCöü~Dý¦–£l…–ì»ToÛk}–'¿nÉœ›Ã‚¼¤\/ç2ÈLŽú™*……é „¡YJhÄ’pÔ×0܆Ùþ"ˆñP¹ÿ‘wbŽqcË?bÓq4¿ŒGéÊþxÕ\µW‡z÷—_ÃÕ/HùÙ&Œ=þé +–=lÚ4$bd–dÚa2Hâ=ý[öDZ~àþÞ)Sí%G†ƒ¥®DÂ`o5 +ú!ØS¿"×{u«v›Dn¹ŒÄH7Ç%òžNR5ÖëàaN‘|ú}zu9:¨^Ä#êÌÞMMMù +þÊG_=r%écZPd¼wÉâ\.ÓŒ?#QÒ,M1ׂ³Ö^mg,pKò“VäKQ\XR*#B<´mÀ)61gÙՄߤ֗í(?:hŽG¡:p¢f4üp^]k‚k,×y&ŸÉ±xßìUº,Wxô\Ä^¤†¯Qús¥§L!fn–k#(šOášúCÜÕ2üÙÁÉ8z¬(Ûj»¬¥]ëpZÅ}fĠ†7,[jìálà<Êæe=w‹ä…W×)éׂ ‚-¦¦«Ù*Öî]àUïoãÚ"® û麕X•–¢©Þ21ûL“±¾î®nçÑÿªAþ !¼:˜bä'ˆ.ìÒΗÿt|?^b÷_^šP`2 #-´™^X÷è°ª‘ò•#šJq-Ôííº +ZÃ-7OcJEÿ–TþÑ•Ë$´$ Õ×ɬ\KupW_Ð%› pjÕwø|ᓦ7úöùáDÔs‘kÊAëÔž«.xŸü‘‚éxsvL—1ð6Âü%c1›’—Y3Õñ³Œù¨á£•+yº©­Åž}M÷˜´£•·õw!C¦nBœ5=k³ç”R=:÷ßì—ÉŽEF÷H¼C.Ý‹s„X¯ 6°k(aWI޿Ϲ‘…1tÛr‹%›²ãþ™wø†ë“!Ò¤5f¶øHØú³K–s£M×R>œU-Ý. r²KèmšW¦õsK„UKÝ4Ÿ + »¿üûÚ ßÝßàônƒ¿±ÅÖCk½¶Ý¾òÏÏtSòæ¦À.þ|ÇæÄrHzS[© êØkpòx5F_4IâÆ%X·(iè[F>D¦0|êì[ëÜÑ*¤Ò²Ï êêÑè®Óî÷µ ‚*Yö&þïð6AÜ‚p>ØÉ]‹àX€ÒNõYZÛÜ”0.  7ÖGÝä¥\ŽwŸBI¦9æÈ{ÞGŒùMæ•“[ +Œ Í…œ‹ëTEœÖù• +Ù"€M ݳ¶sKˆ›œ[ „*»±q+¹l*§¾OèH§§îÚXmÞ³«Cÿu&H†# ¨9Í-)ŸP”/%ìÀ*°[Iàwn©@¯W­“ô%OsŸsp#506ÇðÙhá±Üä7§sp žü‚ˆå!¿aŸÀ©§aË$ï­£x0dzŒmwH?†t‰3uñ]OyÀhÂé d¦uÎowà"NæŽІžº¬l:S‹uèÉe‰Úï[$5\vK1ÿMÔbÇ8JÚ ‹¥=¢›8j*çvÜnÞM œJÌLÚgr›¶àÍŠëÒ"6Ç}žéž,Î¥£ÓjŒ-~²B¸Ž-U 2¿liÖR”¢œx¯%–è$‰N>t’ÕSR5Ú.Œï¾]Ÿ5+læ¼üU*¡6U¤:°Žl#‹:P¼Ç‚ÍsRßìs1bÅJT߻؅ӡqÿ¤žb5k»!ÀÌl(å)c[Ò´qu˜|lJA[2Ò!ðÿŒ·.†´Ÿ'—gÝZ$Ý¿¯º'VÊð…çâÒÖ›>î–VW÷ÀF~‹ÿõy>•°%Ë]UùÜebúÜÂI”;­¤êÆüÉé'i©!UU4vÔ~jµG‹Õ¶²jñ9„ÀY.7G ¬ +xho£Ž•C/ëu®- šm¨Ú ½ªàªKkô÷µ«ŸaЏ,¼åèn$­R)‚{Á7ÑTìÐgÆø¦«oŒ¡1s<40Öne'àyèWÏsÓ8¯!®½¶ö©c8|HÅ[Çέ¯A"­kW°tø\´¢¥:ö·j+qW,ÞuÂ7»zÍ}_×áCá_½g;cS³/#ÎÖNPÑjúMæ)Ôb˜¦25/¸ñÙ.BÖfS8G]ÎK]ù·q;¸c4Nò²'Ì«Ãz!Çb=Î-ÍI>u?3D,°§*ÜâS IæÂÅã…ÙÛô4ú‚`’u¡Lqá¨eÍ„ƒäôø-¿ˆ#MIiñ•èê ³j¢ç¦ ¾;ERBÂÊÛ°Y:à@¥§=Itõá.„B¤A Rqï'oR—[ü’Õ6{|'‹Ûµœ%“ª/‚˜„©ÓkÕë)4 í é˜ž|–¦àÜÂÂï*Ý&lu›ÜµO"Û™Ã`eÑÁU™ÇP/^u äÊÐpxd…ƒPm!”2ì’GÌ¡D +æÖ`k®6fdަJ[c‹²šA3ßKÏmQFGš•æÔsEµJa À¬à—;Ì’€j.¿ß6ÝÞ8žƒäZ™l]ËqX=¯ú 6£(MXÚ²–ãðæµ©!´>륮¹´pFɩʦwà5ér÷w‚¡Pi[x”l%ˆôpºJÚÁQr¢Ÿ§/8¾xÂv‹]Å:ªkµ-{ ï{G_àf% +¹Îô°)hÆúŠÓ ’u¤±{S‰kÓI]Œ!½ãn²o8#- Žª’ÉÊõa ~²*ýÜß)¶¿ú„Þ/ɪd)h Dát“M…¦%sð”aÈÇ€%^#'_»òÌq¹ÄCÀyûc03*µ}Øõ–÷aR~z‡ç:O>»¿v<úíñçŸÝ>}ýêן^?Ã{Ýÿvûòßßþúã«_¾~qÿÓ¯Ïn¿¹}~ûüûÛ·?~ýôÕ/OžÝ¿|ýâörcëøäó»WôŽWOî_ß½ºö€ñã/ž^gÿãÿÓñÉ7·OŸí‡H"Mü‡À§Ýœÿ´a¡ïÞQÞÞÁqó{vÔ·wÔŽ{x÷ŽööŽF3¿gG{Gw¬ÿºãÓ÷çñݿܿxyåùSÿ|üÑyœÇw?~üQ?ýëøî[ƒÑÍiw]øŽçÑšž²©PVr1?D®µÊÄ8mˆÒ‡,|ùüÿ¤ãÑã»û»cÌsƒÁkâ+ÿü̯´âˆ ·ÚBÍ ¸ ˜0Áä93‰™,J´œÐ.ÇaHž­_B/18Dc²‘¡ê΃ƒP-оBãÇÄ,>f†c’Ç@)¦¹­G‡D‡:mëo'ödXÆå³P%kb f± 4®³„ZcÝf©P¥åld6£"*N¾Pð" #¢YqÏšIÝÖ«2µ\] ‹ø ×ð+à¥S}‡5M’õ,)¹^¹Çïh°tYANmZØGúÆGLdxÙù̗在hÅô)˜øHçV¬ËXanë8ã÷†.¼¶ ®“¡2¹®Ï Ýɺ¢~b‹*³ÄY‘`5ŒåèqPÜ)w©!½Ú¹œ9ºŽÚ3‘àX½ïíPo‡Y>c‰ÝJQÅ(YcéÔø¹ÊÕRéQ³•¬sûLš£[ìZS½ŒfJg´C=KËÞý˜?7#²í§®=tGÞ–t(/‡=º:²v¤­ëÒyuhQÓ= 'Ò{çÁ«qFªÔ5Š¢|×¼V×gÙm¶’ëÁzõ9£ÖX¥§[<ëÞèêbK¡?b¾ +Í([0;.Ožñ“]ÏM^WhîGV^{Bá¹?Oì“wKÅæÿ­<µž—o΄k ñ@ÂèDÓEw‘à:‘ãds²i»"‚}½IY)Àh2ôi2*:Êåë)Ö©-«î ÓÅä †c=øîë£Í7ÝG—Kä¶pKù=TùáÉxƒžé¿ '®¯ÃÚG]2Ã4/Z:¼…¡ÌyHŒÒˆ¥€×‹?3”ú€…ä"DˆeŠ)e½†FýSæˆn§É™G7áî"ÍÑeXÕv΀ÕÓÞÑs‚Änatj˜Ã¶“KÒéѬã\šÚ¡ŒÑ¦‚ú©ä +µt³žÈï²\g 33®®ç|%Ïtí`¦ŽÞ`ˆ ßÓ"õ:‡O¶Ò¢ùgîù:Õ:Ï2°y:àÉÛUÙ /CzÎØðÞËå:†Á­¤‰õèává­Óÿöâål’m²ññð$À•.…lá™ä¢¨&l*buÿîÖbÉc‡$Ç_U_Šƒ§ÞÖsïÝ"—9KÜ’ÑôÖôöÉŽuwå»ßîc¯Û¼§¨¬žGŽz[ÐN"kö¨Wfi%ÊçO¼%Âÿi>¹ÏÂM~kÂU2‚àÏØEP­8ŒæU’ÜH4¢”ÈMßC_‰U qŠÚÇ7 ŠGÝgó’nä€O±ß†õú9YÒ®S:bì§Y;t !O0º\Ëê#påþ(à`á÷¤¤y¡e¼Bm‡!&ÌP0^ÏÄGÓ¡¥ð¾¾ñß +tY¨NzŽ8Þ‚Ã$ ©Ò5é6}zÒ¯tê¤V–O÷”1³6Qêa€-d“!ÓsßÇ_hky† áVÞW!–ÜénÏÑü‰\9_÷¯·$·çGf"yo°¹o‘Ád6ùütþûÀžNJÁÅ êh‚»o‡)§×í7Š¥zÍá"õÈ7ûˆÎm.ÕA‰Qhù J¿tÍš}ôñ_£ •B€¸6N‰ýÊ"½’hœÃ ¿—» Ödù‘…pˆ× Ä¹,PÙ~ïeÚè·Í¿(ñá¦D[`—VT±°ò(ttHP %o¡Æ*0ç @—JEçëe|p“áUg¨1 #‘€·kÅnÅ}ϳü:–l¿œ®ìªc*°Sš¸e§Ù†%$ä1Jú›bõÀqÒ ºƒ58(|«@®:Ú¬_a GY¦(vêÂ=Š®CE U}û¬€îí–(°P/$(]ª[áéC0»s4•x’¶„¿Ž7üŪÛ)pEº +ÈËI:H~Þ·è~<ó§aGsˆšY£Ca#a¶Ò2nÏr§ŠMRNLÉúª˜àŠ âm@O@F¾;°'Ó"øYkÔ8ž¨F‹Ú¬_ ®Jè²(¯ÙÏJ†v­RϵÅê¨EŽ=˜É·;˦LÈïUq†/6I¨õpñ…AÔý,{؃qnô–n)9q†›dX ³ÊÓU"j²£VÚB¥ºÉ—)&ÆZ#Ê_º.Õ­ì»a1 ‹ˆOn¤C—„šTzþjãqW.éK’ÐÁäâã?¡8'×ÌÊŸ±ÿ,˜b–@‘úG‹á[mØÚÃh¥€Ìë[¥’,M`)1¸±Ë§óÒ½³šSýÖ1XYd*Á›°U£«T‘±›¿¢£ðÀš³ún½ž!¦Ñ£bò&_³²CÙô}ý’MBx¾{¿aBs CF-â½”p´ƒù‡þÅ4áMáx˜De³˜D@I³ò‡Z¨"êŽåä‰Ît婿Ïp®VÏçõE˜o”bªLšœéP6Ià/T¨«Ú€Lz“éǘ¾ŸQ$4 .aó'1̈Ž(n áà˜€DX‚ƒN*ë'<üÏ3|^Q)fhbpIÔ•– ü˜¢Ò9 –öÌ]Á¦l̹”ÚÜ„€3å•&öô4a²©7¦(O†® (uÞ%å- \Oö!}/Dê:Óæ´z„ZÍÃN‘Øeâ<ýäÿ TÝxÚ +endstream endobj 37 0 obj <>stream +H‰ìWOo_·¼ ðwx—6Ðß3—û‡ds²ÝK£ \É-pe9"K€,úí;³û~I»)Œ"9õbIc.¹;;;´­ËÜ—¹n6÷fº½xtÑeíÖÐØ#†o½·½%†š%`î YݶKD›Ú oµh„ŽÍÖ®=zÚ<Ñdd¾µb%¶¿À¥ÍñMþ$ ÎS{]ç +~j…,¬ñ}6™\cB$òOQ©KõÝ ¬/œFp^k£mÄÖÛÚ»èÊ]§ZF5ß—OBÚšbMß¶Þ,p˜…K63†mæ»Ìé ’5|Âtw‹µÉ2k"¡²Ç\@v\Ö6ÅúŒŒškoØ@¾Øc“» $@com(l‡mj¨B· +Ò½G±èµF˜|ü­Ü8øšÚðL1 D_GY S `òI`‚¹¢ýµˆ’’ÿòÑÅÛGÿxtñÕOŸÝ?üåúòáúîöõý¿¶?kÛÓçww7Û㟞}óüêõLJë·oþþp÷ÓÕ‡¾»~óðîÛû»·×7W¯®Þ_½ÿçÕýÕ›o_?¼{qs÷áãýÕ“íOÜD·§ßÜ>|Ñ/î>Þ>ñOJ&øÒ'ÛS^ßþ¸=~þüÙååÇ÷¯î^óÄÇRÜáþõqö_ÿ.ÛÓWW¯ojQÛû\¿¾>ÿø¯G|v{ùîîþÃñµ¯ùÏ£‹¶µíû7.b{üdûþ»üÞdúbÅúí}co=PWß}v'1¬aÓ‚}²Pf°dœ",×W?ôÀ°Þ}H‚[ÈÖÒj­f^ŒW4_Æýõÿý**Ûãg·w·Û˜VýÈ*ÿÿüH=ѽ©õíÙí(ÅûÒî‰é*ʺÐb¹j¡¾«è¨,‘Ÿe™PÈé rÈÖ¦ +†£C ¨zj{›†¿BT2Hå^ ïÀõÞ÷Ê HHcT‡¾Äâyð_§ŠÃú¦’`ˆwïØ´ñ&Ê- ,lµ ïK—WÜØ;GÀÞ%ˆ5hÕi‚Ên^~ÛN”> 9:¹ PAoËe£áOŸLOï?ÁCA¥'¡'¿”˜ˆèvSXJ'@ÉY„\cm'cîVkˆåt¹¬Sè¥;:Ž·L<±=9Þˆèbš¢>gƞ‘;¯ÀžGÅI£þöƒ×ã>¾'ê0‚§ÏPn3Ê鯔&!ÓÕ»Æ9]• +ÜTµçªÆÛ`zjO&fí±æ(ñPéYÅ™«!'?dÏŒ’ 6Y@Þ½lÀï:%I“c…¼Ê)Md† +‡ôaI5”·ñ{æ–”lsrtc¢ÚìERáÆQljÑ6‰˜ƒTÎ +bۆʮ±üS¦±gÐY>"vVõ}L©†Hñ8¶±´‰; U‚$+Ú-WŒ1µºQĶOºøÅoÂ/Ьÿ<ò£U´¤(’…úÎD¡žÌ¶1½Šë…›9§Ðü¶ÌùiÑ<ÿœæ-ʈ{¢[Œ)zËSËN=YÈ¥—ÁJ ”fw›`¬e0E̳6ü®¹7ýó„d´Ž¡ABRÚRn%h‘–RÊ¥e› +ÄîùÉ–NŒrXÁŒS©¦¯µÜ~ßÞц‘€Áç#ÒX‰@²¼ÂìÐ/ž³á^ŠÖRÉ`Xwš6t_ƒ1LÄFF+u`êã˽Õfkd(Ô +”¹bvÕè¥.M1L54~¼Õ²i£è‰Ð{%Þ ,ä‡*RØúJ'€zn"ÂÎey:Œ3ã°Ùê)¸’ùÌíû²Br^ñC¢®­*^x6 =@£;%‚„ÂEÄ×ád:}°É ¢jüLfi‚á}fÂj8Ò•s^ðGå“ 5^­ŠÄA›É§&JÏúŽc·¦¡Q(b$pÉZ“Í–›!&E¹”\’iµÌžœjÉ9Zw"vÄ‘™9 ȳ%yÕŠÑÙÉñÚÙbÎj `Ó'y¬Ð ‡€Ö0÷£|Ób Çøä´¸}Ok°æâ2tž¸çK¤øíZ>¼*Þ<¨£ƒ–‘PŽƒ9šfßÉ¡`H;â`A:‚£!ÔËaJ£ÅvšC¡_A®cÄi‡GMëáhp[EC¦‚BgA;{2:æ4¿/㚆*kžé¤U³O|¢>K³ùÅx‹æ›VÜnÚÉÔQóÀíÞžj=š·&%ö™uœžUG{Ή›(öPÃIƒÆ$‘LRäŽîp±‡Lž?˜¤¤‰Ú©#GäsvyB©¦ËóŸîïï.ûãÇŸº}ùááõïî ¹û?nßÿó¯{xõË»ûß_ßÝþzûæöÍ¿nßÝþöˡW/îîßxw{ˆ¯^žÿüö᫾ñðâþÃÛ‡³ÿ+8–7¼{yîþøÿùòü×Û—wñ#šhÁ"?\ÿyòŽ?¾½yuÿîý“³+Âé£JGž ¶oÊ”‰˜ê.AÇÚe +=º!d¨óÈs·èhB5̽š¯ÔD~jB‘žÉvècôv­=1½þ_Þÿ¤¼Ÿ&öúÂÄ^¢néYôc…¦ThqS‘)RàTYY=+"StÞ¦È2Ó@„³KÅ já½}@ñT›Ä„PqÙR+há"FÕ®}FŠÃ~&»,E —)fM\C»¤mr°Z’“pF¡Oü¡]¶ü P÷8¢} ¢ +¦ƒÖŠ„„'‚áâ$YñÕ‰¤:‚`IÕu$Jvê¥9K-hÆ`MS:üG°¶~uã:ø™hÒNß”5²Æ¾…lŠîJcÛ¶°q9ywP5R¨"µÌx!‚6_7¦æ´¢H6ÕÑ÷M¡ÊбúPwwÄÀeaÎÏvÑèÉÒ›ë|*·âˆ  "ŒÑ˱ Ê׬‘Ò(µ2¯¡©ZÌûJËóZµ\yÉT‘ +”æ¯y­ê=Nªp— s‹ Έ•#•@Ζ´“˜[¼æbphôKC €Qb¼ÕãñÎ$ ^6ÃEšTP“Gi~Û$•ž]Œð&Y÷ /V—~Ôb÷³" ¶—f¢ÖYRÅP Ô§ŠE¬#õ›V +T®®æ³|'0M”¨Ò]±Š¨”Z7©Ù;AÆ[Ô ê×Uì?ÿÒÃßÃÞphk’¼ +lKLH ¬Œœ‰î¤éZÚ.Î’)êuÖ(ýz”ýÆ›Â[×ÅWÖʼnM¹Š®±Gɰ ©@ÑÑ< <¨K)Æz!— gb3Û¶íÐ]H-Z£†< (!} Â¶cWù{+:~:#pTRèÑ)ư (îqÎBÖv‹½Õ8C"nIÈ[Íq¡)(¥«*½ìbbxI±©&$Àæe™‡[ùÍ•`ݸåØÙjqŒ]åB )YF;]Ð.6_žåŽ«± ~0'L +Ò*7¤~Rï6}Âö”3Ù¦’!"PÏNTõ.ç™õè¼SŠÃÕ"0î„ðAË!» ݯûé>'KZÙqôîÇ_Õj¸!§D @þ´iÆ•™ÊEfƒXÏ×w--ã™Çy9Ä¥¤fq€ü´žRô"o$ÌéR¬6¢¢]@@ThÝõ,çª.ò<Ш2Öü5”/pa ]FG¹ƒ…0@=râ:]TQ5æá£:§Ôuד ÏÒgË£Ë?b¼ˆ8mQ‹Êµs +t{í‚ú†FÖ°åGü'kh0Ðý`t¿ÖJŠj1sªŽGÓ-éq—ªtü +s –->¦.C¯twø‹$í]`[°[6_uHihp_É>dÈb‹[•bæ!°%’ŽÖh"­1µÅkM\­÷ö¤Ô76ªns«µÐ¦µ æ3âÏ%ìãõCüÄð÷%·w¬-°#j™[¿¾¼ÕØ!:'79¬5dÚµÎÒ£ºva¢PM‘ñÍa^5KWz‹Dë²â%„Ë¡_±ƒ6»ü¹†OBý |…Wëe£çË5•E™Q“¬ä GKÉðo'Ê—ÍÕg¶BlÇZ-aƒÎÊÍΓ M¯ílX#¦Û‘‡¾ºBbT‰ÁÆÜböÚšPköêÅútÈÄ&Õ¹Z 2Yë–óUðÒ-Õ!ÈV„ª±KgCÇWE{Zw4‹·ôæÜ~ê#~[sëóõ>;†§«­øÅ@ºGB˜):t®“Ãðq³„ÁêœF³›%&þŽ×&çxÌzæáŸ˜{ÐdêØ2{÷–,Åm—6úq[ÂH¡R­‹ç,qêÎL¯éär¶Œ–}p™|õ.BqnA“ySåJZ§ëGùM¤#{øÏR!³7Gk·Ò}/hËɳT?ˆº +ŠêVû÷n£ [J N0èÑ•·Øi@hM.Æ9œ½^r£Mròßm$rZÁz ª½V¡Y#Äd(èH×bËÁ%ò@q+)0³–¢ÖMßž*wîgÇHÙ®¤jþ²f\m­AñeK›Är„$v„æUEfCH}J£ØÒ¼Þºg=F f“n¼‰-9ûf‹Òú£¾×qï½ëYB=W6 öÑXz»s µ4ÿ–§,>c¯![*o;èͦŒÿ¬{áM*’æ˜ò—K<ŽPBH9ÃùÓ/zŒQ­{Í r;ú)´ž®ÂרQÚY?Â÷èÉ5}¦ ¦ûEð*#ꌆ¼)­EÍ"áÎÄã[ZÐ/5K– ²Ji¨—u<¨°­”ýP`j† šô¬Ö"( P[•Û^Ňj¢~ȶ@a•ŒäÏ$”ÐÇ]R䣪è;~d¢Ä…®Ôή¥5-Ü>nš@€ØÀüÄ}’ºbOÿÍ{¹#IµAt+³!$ÕG%÷íöï¾Ì,Ý¿ ‚¸g$µTß,D„Mp?»T½É?/b.M”ë~yiÐìq5訛œf!Cqü °eZ´Îhà1³ß{š¬î˜Sc ²ŸÙ×2E$«`ŸÙ5¥,’aõ¢5Ûôs÷oWÓ9/«²Ö„ví:S‹dzjP •Àö×o Ö ‘x-¤JIVÛÚ 4¢÷LóOü¼#ÉúVI'òJ;=Òu5«ûFD¯bKãk{wÊkŒýæTs 'ŒÍ6¸—Êc²…6¨Á™Pm[`ï^è¹4opž€($K‡N#Øý½þú¦~#(¯y7Qk`È€þÅ”Ê5…x&ÖàU¶!€öÚwÓ ¦óg#ÌB =Å}ÌR~"døÆmyÿš 0="ÉúÅÎ8ÑSQ($q[8S·³ãÑO,ŽXÄAO–êoäy?qæñN‹e±àØ‚ËpJl€Ç~õÀ[¦üæ`õÓúí¼«g_Õßð´Êd¾Ó¬¹¯a)'Þ«PrÆûY!l¿„äZ ‘‰4Ûð+Šží;n×à[:Æ‚N¢áã#¢ÿã;‹åšÑ²µå5‹(œ($ñUø-~S:ñS¿X!‹ hI6µ<¾÷Ô”z*¾cë‡Å[ð›²'_ÆçàŽ' Qñ½±PÃQàð]Þ“*^ª¡è,¤ƒ,1eĦôøc,ɤïm?Γü܃Ÿxˆ>³§5 ~›êÈÑ54¥ðÛò´ê||uÈ;B÷Ïߤ®Rä&]ƒGЕòqêiv¦5/º`Å»k,T<r¾G´ü¯ËXiêÖKµÇŠvAˆc€aít2iŠbËDÕ ¨ú°Y“ :¹[š zCÛ@k‚E¢^¬^Ï—¾Ñ[ìR«žM~\‹ ¢u–L1ÈÒ §hh\÷è®Íì2ó»È/Ⱥ úûìgÓfÈó¥á÷dyÚÇc ¨[È‚;žKOK±c'Ú>Pr´|ê~£C¸8º?F]’\¬Ú¨Pp©]gúÇ_ òǧƲ/‹ Óþqû;ÒãÏÇL–³_Ým»ô|Cc«F[7´Â™êöö Áéz§Ÿv'HO $1±&~’RÞ9ª† Hr8+‚ïv¥¡ÚÁï`¸^h‘¤<@ åtWõ/ÅÙëÙu˜z`y”U»œ éS¡nƒ0¿±õk(§¾‘Ÿ¾µ‘¹ fö)¾y™ ÚçÙ$Í@†îÐY…´\x˜{õA1Ø€ÑÍàÌ>É{WpxüqO¢Æ%ÛÃî¢qÁº€ªÆoS¼»$æÀimifü×ÛÚ£œ}Œºú¡9«]¡$âÁsô!´ŠeÖ›ú2ŽÆ˜ƒoX¿´éBò ¤l¼X»ï& ú|-Ùcܰ¸{Ôƒ2úIdç7ë`Z¯`ú\ïICÞ7ÖSj?iˆ£ÃuÕ½Ç5¶Ýpè¢ïì` +… +ôæ4L úƒêà¡Ú.Ú­l²P°ÿnÔ~+/3;’ ¡-?¢ô,2Äwm”B¤€¦gØÕ¹»|¯ËNçNÂØ´îI’HˆšÕ` ‘ ¦ûqO¢|ƒ`m€Ü€r!°ƒ`8Q”PÌ0ý&AP©q…0¼¤c–Ýî›A€éÖƒ®#È@×À[k°.ø ™Œÿnò2­YG„åÇ’ÅV ÀØõå ?÷\ïd/ñùåû·÷ÞçüÍY +Å9§ã<³e7hŽÓ–Ü u +pj^‡X»qQá¾9Ð7ä*éëztz’Uræ³o’2³Ñà¼Ç‚8õ ß/~¿J#X|ÌÉÑ’ Ê´ òÇB»"N§Çf3êƒf.­94@¡Î ]X=X­÷ìÚHæÜ….ÃJÁï‰,aŘ£,¤îÑ!-{«¬ Ýy, WجwS¶¨ÊyR÷Ã?¥Gݘ'ˆ.ÈЖ{§lS%àlbøö¹fßÏëÙ´ÞbcöɃ +S¯d6ÑSý¨WÎÊy¥+/Öykš¥Gº„¹`${XÁY`‹F °=°€øäŶ•­þÉ 7ÄP'¤°åóìŸCŒh£²`oë“Nè ôîþu¼iÐê–eûX·> ½ANéßzF1;kÜ]cmBFóW›¾%y h—¾Ö+éÊû¢Xìy´x :Ð ;¯Å­-n+ÞƦŒ±ç3K`kAxŸðÐÓ$J?‚}¡ý’Hz“µÊ÷Þr“"|iÙ¦â&ï£j#¢Hx€öQ©Xï«Ã´´;~~&b}à‚Ë +€¥Œk¢cìTŸ^‹ 2-ûZÒ[sõ¶Dú(‚ç÷ºË†_ÂÞ@2ÎCð§}QIÂeS÷Lª‚±²oÒÆ«ð×û¨5ȶJµq E>‰L†1ìr<®'xä5¨:™Ï6{Ž‘M˜çòŒnDPþl³‰\¿°OWè“,?þõü[ÒD7³{3Ì›'|gz;’—ýúÃëúcd5ÑðÞLÁxrc»G4e¸ .ˆiŠ«îŒBÂÈ"8jÁÅ.”*}³PÚ)y?ÏÀ«àµl’ÐÐ$Ƚ#²æ<ÏF7ÔyÁâO5§ˆÍ{”ÄŒˆwj +.€ ¤úÏ ÏzV"HNTªoÞÙ7E? š}}A/ê‘ûþfDÇ´&’,ˆ$ËýõnÛm®!)Êg+žq$êæ‘½ÒuUïcØk#a¤–EëŽQöñ¯ƒÞк™êËs_áà9: 4ÑMåþ6‡]Y¶Þ.·¡ÐeYº0@nå"©¤©(jêüÐB ntÖ6 t%¤¡ü‡ª‡¨J) ý¹7š¿À6!Ìè$Ͼ˜'šúωrº5ÒXK„ +¶‰BSæÝZªt‚L¯Ÿ—Új}^!p.Ê{—÷¸À,Iû ª­õÖÓ¿{jùƒVŸvÎë–ݽ¨¨ÞFA3çU‰ äî^½—âæôËÁÖïÈ—€… rž¿Ñ,›rW[ôŒÜ¯½Ë/MC0êwÙÚ?Ժݴs½x~ïÞ¾…>Ë轃3FG€UûñÞËÇ®ãÂå†NÎûZŠ8”(hjZjjùÿýUõœ¹4`8¹Á{jçÑÓêê]çY5)ÿßK,ž*ùÄhŒ¥j$Æ"!}ÄpæðŽsÞI”+Ü%ê¤èk\ÐK‚­˜JÀr'9ÿ>йÐP¾6Òó¯­DóZ·!À¯¡ñ²³\/b¦¼râU¡üëÌÆìã9ZÊ jq0,0üôC¦q,RŠ +=ù¾Nâ1Ûݽ]Vñ¹…ÑEöñ€É]}KaÂöuÇŒ#„a¦ÉMQqoÿ"Ÿ¤¦—µTb#ÔRôº²‘•çËF´Í0¸Õ.[…•&ì$¤ÖÐp¼­I»¡æÞL–g\|CI¯sP^ÛÈÊåØt”ÁrN#S¢Í +\9|1ÚŒ½Ä$–õ‰Ñ^3Õ.…PþïϾOìôÛX4ÚñÖ¶ñùô]!*³/Z¯Ü !„õæ!Ec”ؘÓ6(?û<)NBÍæw%Ó—ü5®yܘRîŽÆK¨cc•›rx0ÑP®øQ \®,[‚ÁÆrZäÄl'™Êv–Ä–™kœ³o 6`)àÇò‰‘’{žsGøÊàº@½.¿5%¬×þÿßü¤à[ÕÂÜ3fÚ‹õœTŒ€§|ò¾½äN G÷UïMôœUIÜJ"ŠLl P©-žŠÜûÌ¿ {Áq&4¾2j|T¬a´H]À¼¥_䈛\6ÿ1^xc®/Ãy>¾5/‹F.$Iu{o”¿f¢Á²xöõó­çäKLÔSÿg×Lq’„„UI¥jxlaeD $ש̓GŸa¥7턯‘$+q§‹ÐGÕà:ûøßl@¾.\RÍ^’ò‰CŠLþÌõ,+€¾ ß]¤µ.íióÓ%­ñF(TàÞK±KíñαS8v—¾‡­ûÓ}J±È ‰A,‘å`Ís “N» ÐÄó†$ýP³ü(#&aH+cM<%@D®+Ô8ôíÞÂYK;›š_?Qzu{‘eµÏWi²äs–ͦ´^?˜‚°K:Àn¶—q´Ч¬ ¼×¨(3€}¡Cl…ª…̧,âõTœ°B+Î+™û¬à…<é£5e–㮜½)·»PßãìJÒ‰òúÒ‘z§&À´gè%"UÇåó!n ¬¹‡ÛÝí¬#Ù´±WÆ (ÐÛ71¿GجIi‰(Ÿ'ÏL!rwhø<{**Êš;סK 4o6bb¢>›DúiæÑð°ØøïPàNfÊ©²Wò1>®ÀtFiÂÉ4•èÚ5zÓ’u°¦\g&ÕM‹E@oa‹¥ÞûØÕD¸Â¤k|ŒF"€Å”dÀS˜€™³wišU‰á¼|.2-FšÃraMÙJO€YâàZÄ_¬@ +îS‡\~0ÒÇ‹¶Çqi·oât—œÖ¥Çrµ¥Hé%žth +òd(Z%LÛäEŠÐ¸»Â©¥žÙ´)ÆKf3ï6 ˆÉ¤2¦^×HÍDð„8]„ôsnAm ` º¹ ütúDüðn!«I^€”Cüwöa€Ðš‹è[4GRfè$¤¡G\ƒí€µ×³¬¤PŒuHpƒCaÀRÍ6kIO”A +]“Z³Fê—¡N(&.¿yŒã?ÕŠ<Ó»H +¤Bh±°¨Æ®1oáøç7”øAÆAz*aéWñ)ùö +ß ë.éþÕ‹`#sì¶V¸mÎnÑ»O¦  +"ºžA’Ilî¬ê31HR¨ø­Ä4"̬”Å@Ö‹$‰3MhC«Ôýù^)ÛÉ‚Ü^Á”îúž³„ RC´Ö ¤r6 åª2Q ]ekè}ŒX‘ûµkŽ»æÙµãŒî>&[z;Wu0g‹E-왨"iíåWµa™˜µ(©Ç¸|§fç¤DpÃšÝ Ð¢ Às2‚ä2ÙŠ¬HS hâH‹2÷˳§^|½óµ«$Í‚5È]@Z^ÓȬãÓ”3|€­\«#? +Õ"`ifQ8ÕS ¬—ðUqQ>¥‰¡™§ª²úí›L¹Ëa³r˜Õi/t¥é¤`ä«áýõùÓÃiDõöö‡?Ÿÿ¯3ž¿úüçóÙaŸÞƒ¾üoozx÷‹ÔþC~yãÚ’[Qt*·Á¢')ïY(Q Hó´Öf½n´‘IÔ€ è½:·X$Ùæü$ò»¯?ÿvãßÿåüëßþþï…¶Ùë3E‡ +^¡½ÛÅ3ê¼:}f3<3¦©D‘óQ¦LäC•\ޝƒ×€ä¬Õ=ƒ÷5uº­@Ìrݖ;ÖýSmð\¬ENû‘ì¯|· ‚ÝŸ#k—~ìÝM;[÷é3uÑÈ1 p›ö\ã𠞆p1<©C}Ú¿5 Ó ý²GSžû>øE„ +X*áGB/&ºÎvòÅ ˆ=嵦TUwÈ62˜œª€ç¶‰€Ôa¸{×™B c‚`KÙPóÁœrBðpþŪ(-~X’Ü+Ù Ïô \óRÄ)-}”¼5¡©-ÜŽ‹œ~@~`ù\Ðó ¨ßZ´4ËKÕ¸ îñž­ljÚ|Aâéñ‘Éú#hQŠQö]Q=ÇMŠ© EÃeá ™á/Fê%GL¢ì0o©ÌGêñ™^¦º°½Æéøìwª(˜#¬¼Æ¹5 „Ÿ1gûµ~DÔd©³¾Kø/Ï•Å"q ÛÊ{/b»:ÂH:, !—¶ß bò‰€àXGl÷ùôä­ëúŒ¥´ð\?Ÿ¹ÏÈÖ™ç¥ýãYÍ™5¤ EblÊO<×QóÎêµ&Àô|´#­³¨²õxC©ãMk?¹èód«­‹©‰rrÓHú«ÄVì„c¡±°-‰m£Žä=óNòVÛ–U¤LœH#Z^þ'r<ÁxÕ¤ Ê~±å˜ë¾µ§Ç™Š³²SÞ‹Å´W9ín}œö¢ró¹]Ø«}Ƽðà(ê,<Í\åÒ¶­ýN¢$¦Å©õ“ê_'’‘`Öú¾Íræ¦qä»JJçHÎÖ£²0àL8}( Ÿ+-섞g|ÍZQX1>¥€™­N™˜ó)’BÎÇPÓ3¯ º|õõqü±Ø0LJ_‰`̸Ú5ɧgòoÔªÝ^ÜDG€y¦Ä …¤áéëB"ˆcÂÞZ–Œ,¹g§d@qb&4Ïý.²äŽYºÉg5’í7Pÿ›EþQBõ_‘Pl!÷íÃÀC·Û‚jì!͆û£×ì'!ç]!Eé7hò©Nž(©pò–0$)ÙéÄTùi‚íªTÎEù·Èn  ¡õ´{±OPR#hžç2J³¥øŽZ»„™)ˆ©Í7ú„L LHk¾ª£*RŒrŒ¦ë q>âð9òÅw«xMz7ê%ò$€¶ÆbìÛ÷ùRy†ïq¸èžv-Ážé«Äµ> ÿ@Eª§à·ÓÃÍ ¬¹D}€ø4U’@b0æôî˜BÖŠ¬^U(fyeh’ çŠ50>V…GÎçgMW„ͤ'•¾‘àIž’爣Ì<¡wêì$Ï$¥»Ñ^¡Kƒ5%s| ”ÞOm•° ‘°ºsº®^0ùÂ~~WÖÞCµ,š%Çã²ÙB€Ý’×?$gzQbuë«(gM§°j’ÏjÛIòøZÕ;bÕí‹tìK’¼ÔµÊ¦y¤YÃ0Þ¡µ\*Gk@Û½ ë={_7 ã*#3u71S(s¹Öóæ²µ$\¹~¾×›pøx¼E¡õS¥ÝR¶G¹™ÚÎÝ¿öZ¶(9Ð\ªwýéøÕ¯Öó»VúWÁ#]”ý+ÓÕ½×P ¸9¸NïE‰Ù±ŠbzZõÔ¼ÂÏÏÐú€àÃÀ¼÷k8E f{Գъ伵_ŒÙ/§ŒûÝ)œjÆ9sœ²Ñž–î54Å3#d_•ÊÅÚ¾H°î;õ™¬®ªìmþ© bî""ÞŠ£j2¾ Ðá:‚R2¿­•—ŠZêA.çy›Cð.õçè±&ßì,Ý…@ +1&;%Õò‹¤ý£~éò¾®©L Cð}c¹ 6ô;¨DÓgÀ°Ín£çç©ý2Ýô¦«Ý©r +èx½fÅ<èîÖ¾üw»‹25óœ {tá–G½ ÌÆ¢ëÛ5\²ßÁ¥¢6UuP€Fî’â¡ é"û• dÒñp{e_SóVv¦03ݳœ§‘ñÑœ©÷±UGüý>S@sÐÌZ¯=ÓËôØÊKîAÛx©’ô¾×$4h¸{¤%\‘—qÞ3:@”´2ÕYBœ$JoyeÔ¿%PŸc W±ÊEOK ­sW!„KàRwdk›Æ€]îéÚÌq)p{ã +&­×ËÚ¹«˜d$Ø|G$hÿ àÒ²Š‹#-ò%vÊð#¹Ï¼™ÚüAôï2ÔÉsHžéUSj-ìgoß%}ËU;éÐѲªåÔ•H¦ÕTº÷ÔtÔDsÊ0nt†æŒÞA¾7S— èšƒíÕ +–ƒfÈù¶“•tY°vDý—,zåœôlØÇA9H#Z¼4Nè`”;½ñÓ˜Á `+ƒá‡ßu*•˜JÒKWª®ÕÍ6¹97MT¤Ây°QÀüNcsâ«wQ¸VÐcn5ÖWcAÁ\‘Z§ÏL Î'UYe9íN“íѶï ,­Ä)•éi§VJB¶È’ˆµLïm˜Qìy¹yÚÐéàjB{Æñö]Óô <üþ_atç…x¡fæB°áÅ—åØÈœ 1™Ÿ€¨2ïíâ;)ËÐ-ýf0p8Ϲ¢|µVæNVÖ‚sïÚ²öSï‹õÌ\·Ìsu·®ªçO‡Wî-κ’Þëîh0£íä||â0ﶨ¤"™º'/^-””^o åËäI|Ñðê¨öz+¤|³\Bª©d´²¥¬·ÚhÎÔv3æt²MÑæmw:ºž;SýÀ£—P5Y¸§;[»RŸFÅiNš€×ZÀ{GÝÒá³ö+înÃýÖy•"Ý\Þ >õOwÐÀ¦Û3ÒŸ¶9åj³Óâ¾ Y² óEC+Íb°—•eë½ mñÌÀS‡ÈÜÍö]Î|ƒwa_u¾‘úóý— +ó r½ZàÇ~=}fÂþÁx™´ÚzQø¯œ‰Ám޾ёÍБâXBH!ü÷>OÕ·ï"AovïíªV­ZKr½ ¥'+Œåº³o fù_rqþJ9'{qÈÖ=N¼%Ÿî¡àU¯Æ‘Žà÷†W>îR=ÞøûI§wñQ•Â.¥„òsV fõ‹x5ººÄ_™"!%,c[z-­Í"ñPÞðU—–ÓírvT ëæÆSOÊ¿µ9øé*œ_´UÖ»*UªcŒôîp„O§Sv©aú˜Ör›”Ʀú‘Ø8ÏG×gòûz4eÝn²ÕaSPÔ(äãI¨‹øÈ”ð;š>Å”;—°«ãšt”‡ÈZþ.H–¼šõgÍvÖ>k šˆ´™Ÿ”ž£ö+­a@›ð¬©ŽcNýo…CÌë"v"{Wy® ™ã<é¬=Ó¹û»‹;ÆÃ÷éQ„òdjŽ5²rÚT+·Î“NÊ; åÝêÄÀ +i5QÒÏ@HŠVp´Ši‚iû¿µ×TFË‘•ÃPÌ¢š˜6«¨œŽ^²Q+= øšLmV¨B^~ÁÍਠúèåäÂð¶Ù†y P»L½Ø¼!m¦ø¸î|ã÷™ÙÎ †öç3Eìxƈ¡m:‚rEða¥ˆÆíÑ*g'M Ç¥Ív¿¶‡?Å%¿aÊf„(u0ÏXÊß“ÜÁï^ÊsÈ1Ó/1g|ÛâÛý°Œ+©$ ŽHÙ‚CëÃôdzi5çö¯þóäbS~ Ib­0yQ+4ƒ_¦f‚‚Î{ë³LAĬYªfÅ2XzÎÞÉ~hÛxÏÿ'ù*×( ˜ŠzÝíwƉgЭzÅ ãí (EÀ…Z=ùa·¤@®{Jª.9Y‡Àm£°ÑØ< \ïE®&Ok‚—Û"‹ª) +YáFúơʎÜr(==…>§!6ØÖŸ!¢cÍ~X›,Dû#Œ =nE«bõ÷ž'‘{j¦Ìç¡W> „3>ë±Ë¹7¿ß‚þG¹(i «˜ ©ŸÀÖ_F`>Gᬬ±²ÜzØ™˜*?1þ按‹p+cÆQÊ©ÁDVµ$¸p ¨ëêwŽ“ôŒ>Jcæ@ã$u@]¹FóÂ#;=¿)'ßtbM ,S·õÎħ‡Ï'ŸR3×|òçè4w¿“aŸÌ­U(ùǽ… +öÐíô«Vse!ª÷#êí55[Ç*$:` êò‚àfn¢šX¹ÈQ˜jï û%Áu“ÑO¹×Yt#ªUÈ´8ç•buŠ«šj'p£ù/ÙD†”‘dbmuÙwW³Å¡ßȦªy:±¾¸1±5©YJ|Ô–¹jD+Â^Y½¼þŒ¾ +'¶QÃÌóñ³.ÿ%²s5bîzø˜Év桇l»š"»¸·#Œfé#|®×uÿ5éN½ÐȾÄã¸*9‡pc˜°† |T+þ v) é=NösS¬: »¶‚!;ëï#r ŠÆ¼ ]6u¶ÚÐ×ã®7€¥“ûTÄŒgR M~8žùÌÊF&ÃËhe4?Í2ÒÚœ¹PS¿FÂ49+›8|„ßÛn}ô!v±¹nsSÌ9rµŽ®šR½$Y2õh`Mx9SïIû\ïEÎ;†™%$1–)¨£q¼cpèqú´JÄGƒ¹M`éM ¬&sî49ÊU! ¦r¥Öøè2Ë h1¨®k¹`ˆªýH7+ãð! 2Ò0.½j¬ +à~YŠ6w²¹"è-û˜nSÓ=E *¼&›!d€y1pŠ R¦B3Ï™«ë¿Þ^$©à s›³ù‹)4O¦QKANBûK†(o˜ˆÑ9Þ˜ª +xZ4Æc‹R#eQÍûÔ¾t¸ (»%b|Ó +·) +«¥mÚ¶2"®%ð¡£dÆßèÄ}ˆ÷Âã„0×®¢3ÚEï€ßþG;a:)ºªÞXŽÅèÑ ±Ë%Lûs¬ûîºÞm2Ìã‚ämͪUT˜ú±Í[CöåuŽÅ*n8OôïÙˆåë2>›€ÙƶØò†';ÁØòÑÆQßfl#aÕÇñQIB9…ĸu™0• ³Zp¦±V%’% Ÿø(†…]ûÇÏxé—83·kAX¥¾¢4v¢²äqJ]º"WКu¡¥)2Å…h±*²Ö: Š’[šh&ýÁ¬-†X£ŠX•ö^wŸéäN +:úánòžÍÞ¢½â–?gè/ú»øò?þôço¾þ雾ÿêÇüÎØ/`7 Îþë/ÿöÓß|ÿÏ/þøÇ?|ýõ¿¾ûë?}åÇ¿þøŸþÞÿC쿳ô¯÷ÿ>sÿ 郴S¬ ¡iGóFP¹“Ò©ª±WmüY­.¯%ÕK‘ª;CòEòÒ bŠÙK™óKÒ=>çÕ4pQBÚÑ&L(BËV)ª¡æØxõBÕT($±ÛÇD8b|º>þòê@À#0øHw ùÑ·Xª¢0%"Ë…VÒ×aü/{ÓM> sc»x8½ˆ\Ô æYådd °'u.þ,¼´ê‰¡Ï¿=?ƒ¿DÛu²±ùt0C·k•ö,|†Çðªß¼êè9¿9˜@¡L·>Ëò.#í0@h^†ÆÐ ´i)9²¨Ç“Ò®[ŒI—èB”éR[Ìb•5D‰áǘYÆ{Ò*9"ǾOÍrúí9"q·Z ÌržuÐ+¾4”Åò¢<–JÍ›†-v:›ñÈÑÎ<(÷Á %DÐRNÈ—¦rèþ’ aI×)õ9ñˆ³‹Ôó@DMÔæÌ-î¹^14[PfÞTJeÁ®%d[çäëÁÇ>ø¾äûR.‚ñ,"ÐÊÉ z2zŠ’A¨Ôü„îF—t¤Dmhä•·ºQ›!Ñ9u¹Ý³jêJZGòžu>i2Jâ2+¯¥$Qm§*Œ½VHÑtnž×DZ€"؆ 7þûYÈX˜Yˆð ‰dÃN,ÈhCš[ï|®œãC9ОüÑï+ìÉ9-#Üðz©)Øs¡êý„f‘Gò³:¢ðkÞö)‘ÖžVÊ +O±rlÕØž©l¤÷ý`4q¨wïç‹qU’„"R•NÄÑsÝÁcÒÐåÎÂÎC* ¢èž–ã0ºš!#TžÊê¶û,óù+öº¸Æü +âN I$ÜÅ{á*%´ÛÃê™%ÇË“­èæa?Ù‚zd«ÞÏÙ +cJNלÏWpMJŬêL¡mu>—•œ¶UÓ>®ç¼Qj¸ÎÑæ“÷…<3‚ÇŸ¢I$QÞHs“,‰Œñެuâ›]>5¦”°e‚ÙŸ~b‹fä¾…@=û3Õ•{3O¶ά{­²öÓ;ãd§ôý^ÇëAƒ5ô#zl¹l#põµ1k+ëSƒoÓ¾ÿ¡½ìy59Š(œ[Úÿð&HNuõw‡ˆA„äŒÀ–I‰øÿ<çTϬ„&Ù{oíôLwשó1äË=á2X x»ç¾a.SÊ*/llƒj~fß§cg¿­™Øw=CCØ2ÈýÍÔæ…8A¸>xÕ©~]{Â$\1Ž/©ßÒBQ&ŒçóœÚ/³"M†ö˜eÜ…2Vܤ®!Ca·íÎlÛ­«sZ¼¢•ó22ž¥;ߤ{€€Û FU 7ÔZN!ïd¶~憔%±0y…"ßÈYeŠÄ÷J…:Cöè'źSåÃv–$ˬla[ùžx;ÈšV Ÿ†»à¡}>"®}ñ«™ˆGÝÎfû™Z•Oɺ¸9 Bwlê%s܈È¢ËcÕ™2¬U§ÜJ.¬/J:ÏÂ#iÁ•À Ú•*–Ź8ky ä'²Š&<âÝé,pÛ©ôRÑÔÉNe;Êéé7.ë-'=ï†pÔGçÁç•ÎÅ1!o}é®ãæOÞŠ»ÆÑÈ&‚„Êp0b LšÙâBpá[Dýö퇴xsÁ}ÊDùA¥[í~¼AŽH¥Ç¸Ë–¯¯ê¯½Œæú¿²üù7šåS>?üôå»ùDÎßýÍ_ÅlíŸá„©2-,M–¬ÊÚŠ3£Ûò AÞòQÁ×X­ˆìÔYµZVv92Õr\î×Ç2mµ=%d¨ïÒ +Í´qã˜!±º4ÒYlÕ6uv?¿½ßµÖ-±5ã—B +f°‘F}HoÍkZjÊ~>*ÊÂz¿–]aM4qä7¦ð˜™±ÏveÑ+³_ž42;2þ‰âÎŽT)\Ì”JuÙø»T‹c'3ÊýD?ܦBªÞz[VüÙ{Nà·AQÓ±2Á̼Ù°^_K¼vÚ(þYªðJgä"Þa4˜ˆÛ ›fP£Õ¯ñyö+2‘Aö“^T¡úF×Ï4PÓô¾ÏmËŸ;Ççû?üòÏ_>kEø_õ_ôÏß²ÑEKÛ6_&(¥¥o{¼Ô]†¿û`g $K¶N:ë“s¾æµ[ˆŠ£ij?ÃþxÊ7}¥ïhS·]‘T¨`Ô°/ 9LC^@v´ìßLÂÞyïÂ_»#*˜9"Ôq™$«ô¿Jí #ˆ ¿PŸoãÅ "Ì\‘¸1E…´¦*šR‰h™ãl¨ÂûòŸ¬ +{ hwÌÅ ©ïµ®ãÑûC}öQ‰ÀF‘Nl qe¶Ù…“R<f• ʑ튋Û“õ ±2Rj˜³´l¼¹¨)™!£z^Šï‚ù”óð$‰D6_œr¬ŽRéÂçþ•ñ¦Øä#9×Ðò±6àw4²72UОÏBÍçˆo u\‡Þǘ_^…OG3¢ùKjX¿mu4¾´¨y¢&/,:ÈP"™|¶|;êÉIw¶ º-*C¿•|VOU;L*„¼gYI)ÞýÒNhŒaQ=%ÚVkBÃ@¡Í“¶-˜lrh §YcG”B«Ç•ž–×ÑÛ&Ý‹Óë0a|P©]o"Àuc–CG^ÁºzI‘…. óÕò±!à/¶-òU»3Bô[ö,ìRÏ©Ž>Nn•§æg‚èµ½p¦Iž;7‚$ˆÚÅö±ðœÏ\d}yUY‹Ê*ói=Û—Pì´)û¥Ô›ô”[ªËc{œoP›²ç–rvE†VàãXÕ¨µ8nÝSwÍZ8êœVnÏiãk1|ºc̨+R†p“ñÌã]ˆ\ƒv'’sL+˜ª–•H>Æb’Ù]ˆÆ… ³%6VxêË) ¬*cº°±“wCtXwÄÏÌYކÕîNsÙKVv|shœ§fq¼@87Ta×Sáÿäfêq7Ð=Íz̓¿-Ôµd¾8”+ÑïB‘1èW7–²‰Ð«ÚºG4Yé‹ý®sm¹V¶œvV¸KUšý‹Íw÷i¤êﺢwQÓüãnÃ8t_'lþC7×]±î¶¸^ñiì·;„¬£Zëýâ¨íî~¥We +Û +¯­¶:Ý?™ XU9é å“†I¥Îá„7hÌx›Å}¥rÒÂd$’)è¾É + +Àþ¥D`À°ªß®²wª’Ü#‹)óû‚;/)y0CÁmJ˜Ú3Ðbö¦Z«Õ)`eDÒøºË"‚Ú1,Œj—…Éu#•ŸMÀ<-©Íã YØõRÝ5õüŠ$é&N@ç[SW£»Äôö¼™ÚÕ=“¦W’2Xá.¼"%€,Dr"DQY'ç‹÷æ…œévm"_…Š½ï•‚¿™øüb†0´\×}}_è<Í/óyà4Á¯²Þuœ lqŠ}®sÀçu™*Ãw“ Ht6Øë=aY29[ÞózïO§Ûyv™#ˆx¿½Xp–üéNýuˆ=òöÄI¡o_ânb™q›/ÄŠåášåÔ=ÀÁK°¤Wö~̽*»ô R¹wÄAÅ%™ÆFn³9ÒÛl6)r• ~¾ˆõ,åØ5‚Q=V¹q-¨ÒÚ¡Ñ$Éu˜ žC¯ƒ J¥(Ç[±_Áíªt¢„´¼4Ž Ë7ãS=h8Ü8ä¯í9åÛÛ DaÉwÈ’Ûí»ý?f Qö¬ Ǹ˜is_¿‚Âü˜«HY^Eê~È^A¢‰ªô-iz&‹gÑ„?}ùîç/ßýõ· ;åS>?üôå»ùù>Ã,Ïܼû›p¨Ì/ËôRqHqb'e¦s˜xºåï3Ñ2‚¾ó‡™NÒR¸À¦¹‹KÒ§pC¤Öß0(/´°‰ƒ`‘rÅŠÊÝL•œ„p²R“zµÝ+Oºz¸`޵®åÇðñfÝ1ý)dK ÝýkðÁ>t*ÍaZ¦Æ®\ ék°[ïWÑš3 /pÙ¹7öÆÜ 3©Óö$Füãký_ñµÚ­-MþAÒ_™D³@ÓðÁB_™Ë …6µkæ™ie1’þ†˜I5¹+ÅE/l¦ +Åy’œµå÷›Øº'A ‡H¤Ì7‡r·oÔ3Îg‰$^½™ÆeZø‹Üb:ÙŠ\Ì»t— ¨¢ÊW¼ZˆÌ©¦…¿WB¬Òb)ÏæÉâ¤òPQLÜT,W+u¦ú¬wü‚ÑÄǼ>jv2a™€A¶ça!™“àqp CÒ«\DUÁyÄb=7r†û v]& «˜f jã  +.Co"ðrMch›u>®Y\&¨ê|˶¬ËAEÜTÛ¦¡gÔ\ž)ãúûYÓû@—§‹±ª Ëý  W_ä0ÄÁø*å’Õ`÷ðc/†³#+6†ÝRÐ÷°/}(^ÿˆS„! ëtÒİÈ?´pk@¥eñM¹À“´,p¨’'²ºÿðe»™ÂÏü7ëå²jçqDá¹@ï°' rì¾_’‘L†!„`Ï‚QˆÄy·Ï·Vuïó™ÈVk÷ßÝU«Ö¥ܼqKË)%ÆlÒE&E¢˜ÅŽT5=^ƒ^a­ eÌÓî£Ê`{eÑ6[mq”W2 ù@cj?~§&eHä5T²ûH$KfÁêÄZÇC}Yîºk©1Fï!¸Z„ýîÚÁ5”õ]·—¡`i ¹ÅæKDP)^̾©¬s(F*.­¼-}œ²W·)t èè]]Ž‘ºg/NyHIÛ¨äv»$MCQ©7Ò%êàùKåÈ3VÈ£K9 ¾n½g~„WÀ#ª ¯€ÁØÎžÊ+Ï)§3´•¡Ù…Qm7}òµŸFÈñMhšµ=\÷¹£B,Óiƒ‰fvØKí¦:fÜNÛ«Åü$ Â5úÚÚr”Ejº=gÎËt"‰7³l³R‘ïu8ÚÄS±¼L¬uLÊÌe ›Íôòìöä0H}B[’2.ÕQÖ!¦ˆµºZ‘ôh ‹¸\+ø@µ£¯|÷AL‹b¨ b¯Ì(Pïs —“S+©Ü‹2þ]½eæ†i€7g5”®ŒívÃÎ3sÎõ:ÉSçÁb"W¹”„J-ÝXEas^˜2÷ÙíZôÊrûK˜j ¸uŸÞHÕµ[qI&¼ +DÝ~Y—̘­8¶ÅF¸´h‘³zt 0úÈPýPÕØ3«–×6LX]'vÉ<*fic¡yšP xÀláäÛÕ+ZšàÄò™ú°ç®Ã{ú¹{ŒsÑ¡ L+‹ÝµCòVÜ…eoÆŠµú­ë(¿!è-pJÎÏÒ† g¨ŸÞ Ù£ÞˆÖ„aCª‚¿öB™çA>¸ 7x×sWϰ·*ÝÁoˆ¡w©š%ƒ…©´È Òg‹ØîX4OËlõiœ0ˬlÄÑ%¼ïÆÝÆŠºàiÿsŠEHŠ[T“2+\W‹_µ¨rÚè§Èœ+Ÿj`ž!ì#5ÕØ«Ñ&¯€1w ¼e–Q 5V<$%£&ïËæ[€¶”_²Üzö"M™A sј2ñ*©û7ä‘W¥rŠfI‹Ýœáà„Ó>Ö™/¢—*ùª$Ä’†ˆí5c¼ÆуjZÖŠG’ÝÏcŠöEuól¬ª ,^W9Ö™-Äšùd×V©$„T9Õi¦årå±K!òé²ä h‹fvWKl+æ ±e6[ÓÌÏ&Êœù3~dR‡.žV…6á~ø±í4ŸùÑÓi¡xœŠrí¡Õ!GGéÜ¡{¿7­äyûÌ¿N5ðòÃ78èéò<) ¡HÕÙ••B:€ÄÁ±Ìò²S{5­ÁGRÛ„<–À·åÅ×J¬ëZ÷$k¥Mk//™¸C›`ñl<ª?[mófûª¥^BÊåõÙ«#×õ×r+„»_ÁÇù,Ë^ïÇtãñu*d1T: pêÇ&×4ÝŸ•<™ê^/îb•ôÆFIž¡ôãÚûg£‡Æ´+Öa+ZéÏ»âÙe¬Wuu„{é…_ã´HÝH5÷D»R«X·ÝÑäÏRh‘l\ßvlé)s0ÒÒ»½˜mƒ¨/•JGüe×ö9Æ‘à#°Dשv1ºŠN½£@%]CÆ¢m¼JŽŸ â©ÙõÊÄÚ¼FÀKšíƒœ®qcì˜0(Éy¯M„“G‰/Ÿö¼»ý¼Æ<‹2lóÎ~ÊrøUü¶šT)9|¸övDõï +Ÿ[}8Wlê#Ï`9~È¥ ù{âgg” \\S¬!¤ý»]?Ó,oã%Œ‰ØG;™ ®i[ùl CTRi‘•ŠXT`yúëyÙË;ø8ÙŠ{¶¢|ܲúÞÓMuhg…K»ç»Þq ¶ðJö¾³(îP®¨ wßÙù*ž‘öÈ[1µƒ•e;W’lß·4%T½+ðŽªÑ¤ËýQe»Öq°ßù¨ò÷ë••ï*ÄÒW»Š +×ñÌøí²«…ߎٟñheJZdW“îU:ä äìa‚.kÂHÀ íÿüõ¿?híÝ æ ÑÉúþñýß¿}ýôù—Ç»~øðñão¿þíË·Ÿõã÷ßë§ÔoߤGzüô¯·oÆãÝûÇO?jñÅ5œ>´IKõª€ ŸiHvWÔPêqEÌ>Õà©€VçGïË3…'çkäÕ´bþ‹ A2CÈPSgûÝYåļsôB”hªÕ‰fNûv¥•\, —»ÕH”ÍY5:ÙZ’8vºèé5CͰJ[Þ%ßn¡írK,S3†'9X©¥î¹ú!‡ÈÁ®v¤.ÉÕ²÷—Ò'¹ÔñKgã:¶q þG†`—}2¦¤V‘Æs«%vg#þ|)]‰p· £q§-_°U0—)̤ß=³†Ùä–»åÌZÒpÞ=G lÿyi«Üã¾4)ØB"Ù;¶âh³cc ÛCΕ7•s^_Ú’Â/>uèÍ[ÑIÎÎS²H½½ó/ÿ¿Éw>ùL;k Ìo¢¿ê_Þ¾)- +m<ÁÏŒŽä[.ÕÍ`\² Í„äqŠßñXþo%ˆaø'‚@×O“N¯MA‰H5¦8EË1 à8VD!@XEîΗ„³ ¸ETˆ³™"im°-|g7^dô0ú/@¡d¨aÄ¢)—*ÈZrÖªGiq58ËWS½ÈÐh< }ü‡+¹ ÏÜ¡¥MhFäÎjR2ËÕ +:Ñ›y#­÷ü‚KÒÏÛ‡.æ(¾«<΋ŸœÆ™@¦#>"‹ZRÕ¥wõ¨r!P.]Kb²l%ó8âšyÏQí£ $•ûøíºŽªç}©É#¶£ IQHs¯Pb}C](rÎH žO‡©Ö›7¼ôòÝÞçÊ2£{•wØj·C/}ȇ^ƒ™F÷R]׾ЎªÖh•âôø6ðqúZ<[–¦Mw”+cgŸÖg/¯1ä#†¶u1\ÇÊäFÔ,Íu»Cx-δ‘ÀÏV¬^\sJz{:÷­3¬ðÁÇÞU¥î†õ>^‡Fó1•Sþ),¥zRC’q¿óéE¹!{Ñ^Cƒ¥9‚ÿã|}®&À»‡Ý5)ëìE¶§oJ+ŠFDK[ P¬8w :¸‹3&Ò¦Â'°t±ÔŽ#ÒòKq;½–GìžïVPÖ +!eRoƒ+(ž8³=€TL ÖäD¥¸ËÿX/›U»Š(Ïy‡3ðÆþÿÑQ‚CQPÄÌ$\Qš@H¾½_Õê}î 8oçì½»W¯UõU…¡ÐÚ‹ ktÕŸÛ]²ÅqÌ +ãêa™‰QN?“&G’épÏÝ›P’èx"/o©NÔÂ-hóœg‰xGX`”y£°ÒWo,OFãt5@=uxßÇÁp]}>"×"· _üóþ,BΦvœÅß°W3Þ¯"ž¢’è·rˆÚîì9£Íó>áÄïBÁ†¦WeÞ9î×Obwí‰ÝU©`-/Û~ÛNn©"@PQ]??‹2D[Kß$GPW¥N‚L-5r(ήgàªÉRíÅML$â/*7¹©Ï 5½ÚRÈ=zÆ÷´ -ͪ¬É–½cIȤJ‘/¢bÑ(`Aþöªü#EU“XÎK³ŽêvE0 ÊÓ5¾ä.0ˆV&3EàÕÉ]®a•yèu½®KôÐðþÀñr6 ‡}"mÕGŒ:q©ÂmI+^Jkíó,\)~ˆ®øáØ5z!‰Áb)ól +Ûw†ï‚ï’;{T ·Ÿ^ÚgUüosàº_Ó,qO¸ô¹]écab‰úÅbÁó}ϲ«ê¶Ùt’ZNò¯w«‘†sIÞQÔh$Àr%I¸ûrŸnÍÚN4°›9K¼éIÎ<"¬n¥{?$‰¥h=)ë4ÁzÄάBUFK}ß!,i«Ç]âQ0mŽ-WÈýÌêÒg³O;-ÅR’ÔÚÕè&|©F–ð°w‚Ô“i€§WWñ\3®[³•2Î’‰è3_Ц~q5øDÊGKðB +UßùȆÌ+«/úqeö‚æˆÉòé8UZäE…ƒÑ'¯˜Z®ºíûIiêŠdö#J\ÀU”AF%šþÉî³—»¿MO‹Gõ¶ÜšÎSö‘ûöÜ4ÅXÿ¹QæMöÕóºP…‹U 3‡É¬Q­ª9y÷'PC~ÏìæË‹X_d˜w=Þ¶›µ"½º$¿-2 ß“ª³´Ó˜á”ÕvK·M§fޏLþnR}A>ï +ŽÍS*ØdÈÇ 0¸¯ÕUÿ½ÑÙ7­NGRzhHš1kX 0fK˜¼"Ûèá½ÂÅÈ?š×\)"6?›Úµ+®È gþi†–@rÏ)-±ØÇɧr¯Íí|‘b3û†lEOª®$JŧµJ¶cþ»šo8ñû´¦R‡2-M·À–¢áŸ’Z~µÜ>öëóg_ýþÍëOŸ¿ÿøùýÇï>ý{ûVk/¤a”f×—·o~ùüéý‡¿n/Þ¼yýøøåŸŸ?~~§¿¼}­Ÿ~§ÿ<–néööçÏÆíÅËÛÛß´¨mƒ™ìñD5þñ"z¡KŸ¤*œ†<Ë+TȳŒæeÅ·G=dÇãè{HìhŠÍÝÜðóâ 4D+´‹x3íó©0zéæGÛ¡5tjˆÐE¥a}é“'AW2ŽËß$`Œc´ÔCP n•q+£¥4žÖÔ3§ÓúC VhH_¢¤¡´æ „5­„àb#=íÐežÂ³ùfɬõÕIR’bá*etÆyVQl 4gá™Ã@®&sÌÀ +Áßù*\<ÒC7ñå‘Ñ4Û0$I.RZFM{=;D—Á¥k¶ +*©à´™Ã4¾ÌûýªV5™WgÜ£Æùо·G ÉMµªûëÇÿ¯ƒóíÅë?ÜælÑÁ_ÔÕ?é?ñ¯ã°·â„|ŒnB5Jñ‹à¦§·sΣÁˆI$d›ÖYZÖOçv#ö¡nGÍЀ5$+˜E¯!ÝI º)®¶PÍ;è¢Ù x†”z7ªÍý+·¦pBf (O0 y¸ØüC¦‹däx¿èëcÉQŽ+ã…±Mh°XüÖ‹ãð›¶±84qµ‹<—FÓn¦Þ#ÐLSØ3‡`ÓòvÖZÉ#œ:-®U°ã<‰ÙâÃÕö‘w6C ÆÔ®Ôu<š›—(Û•ª×±&Uždi´á{IЇ”©X5À™Õ²U@8#\fb¹£0ý Íx¸ªÄ€Wés_†I­W ³:9K¸XQŽÆËû²шi Ó†ûår¢J_‡s+b2š©“ÆêÙYjÈ‘Y"­¹ü,5]4\Бڇï;UëãNâ ì@Jǯ¦&ê`<\‹Œ¨pÒv’B8»nòq œî +„ã°KÐwvL6àšE‡Âþmèc{Õh„¦Š@1Y"ºTàÃ¥Ë`‘\¯šDŠáL+lzkœTˆ×ODçpê"yPL™XŽz.“f¥!ƒùúÁr„Ùçy–?.Þ8ÙyË-¸²É{Õ*éHsľlŒvž„rf†rÔeÆ Hbèb‘T²Ý£w¡1®ßî™O¶±G(]¬_[[2]Oá,K§m23¨ ,jÏZ@7½õ`{Õñ®³įB-£1A´Ô:c™ª94þP¼´Æ8D5•7¥<šªn³PŒ¾ul–,ÿ,aÍÓ¯ëa…q=x\Õ]°e.¯‡ÿ¡UÕ¨á¬,¹ñ ߎè +®'‹Få˜õâï<õìQJcÔÈ{öU…ºø®:J:4* +9Ú +rW|“Ö‘ ü)±˜¢èvRÄqëL”£˜<éþ ÍjƽŸ`6NóÌkzt8yw£µŸ¢“OiV¸.ڵݟv¹H[ ¿€MSîP*érÇû.iåÚž”»>)–¶â!§ƒd±Ù&O¿ÍUfتú?Ùþ¾DôÀ„Ê yG,%½Ž ¸¤^jvl60.j6P¹Á†ÂŠqŒï‚™3ñ0 3µ4,9•0®x˜æì¨‹,§$¢ÅÙ„¤Ñw›±ffm„8ÄÓZ6±ŒÎöÛßgyîX5_ï¬Ûk\D-¡tâ«Ö8¡¡ÉËuíË„µõ­Ï¯ äÔš3†×Zcë¾Ae¶r~ŠÁôø)_’-`£KRô™Ò®5íãÙzçJãðÐC‹ÍÏ9vÔˆSìs³º=—³Ô¹îåìgÕ=õ;f%V‡è5®1gÍ€®±æ§è#î;ÙݵTtuEkñ²Ñ¥‘tO­c߯p¶lœ©äÇËzR}Ÿ4¾jÚœgÈ»˜ò4<÷VÔxâÍTŒŠ5dG OOæ–PnÆ"_¢O›ï¦­{ñàA¹ÍS–”å<¡d'iù 5kÉì¹Åû›åxÈTªëíxH“~ðó{¿K’9 e†)ùÏ’¼L ±…AëYà$¯'!4ÙÛGqV®:æCÇ1 oô]µ˜õHgÖAMíÐmÛ¤PÉå%Œ;;TPb÷;ÿ¸Ôû“c¨)0X‡#é"©5ŸDa•á¶ù¨OGÝ—xKªÉýÑ̆Q´V\Á¨0%ɺ2Vëžößø¨%ßþã¹ìYã‚ š ô”œigfçË™„ÇÆ8–àðÁùøßûUõÜ…7ìÞötWw½›ÖÉ^ÂÕ¢²ú¡©!óÊyya’!ÌÔ'-˜Yî¸AÜWc¥¿vÛ¨)ÛžkD•­›e&ê´ 3û¶Bæ´mÒ鋌é¨t5\³Æöø»žlŽý‚=R~•+’Èbñ üÙ\ë.:Kr5Ø4^w½-›L²Ê¨C‹´÷ Zã¡ÒÉtœ»1:’œ– ¶öŒ2kÙÅUm¾ÑÝxc\êSÛ…„á›}•“Ùª_J&P3<ù(¯¦èë´z bW€¡Ëêîxhž!´Üú*F¥” +¿«²4L ä®(ÆLÚcqQFlàfRĈ!wJÏu¹KU›»  ØwÉ®.‚&çUª/S6eÔ¦!LÁ…¬e‰…¡Ëz ja4î´Q}g ÂQb³Ÿ¤Ø—LÈŒU°dšî¯Á²îÖð–='»÷°çÓžY"!ðÒJ©šÕ}‰#4¹sŒ]Jÿs!Rj¨Å“RÕµDŠÜ{£x{8Mù£¿H6#§÷·FŒCÃn5·@z·‘3Q/h±`•%F>;«L³æí4-‡©Úš «µ˜)†’¤ iìSõžFSD€»è]|¶÷r­9­”…_hÝÓÊJZöÞ—­P0”žÆR°3õiíN%*À£ßÀÐZ˜´’ +ÞÝüÁí^>)–:g)$âÄ‚8q»´ñ.«ƒŠ„ÙàÙºB/ÈCW+TË%K‹èD´šLäaKˆŠM-ú`áª^y²2Nù£N¢#ºe´Ôxs…OâØ·ôyV7Ý‹ë6«—#T ý¡Nô©gðY?3[…‘Ôduö´Í‚ˆ$qÂxÔC5™œ *H¢ën[ÆàLŽîõöæÇíͧ§û‡Óùëá×ùpüó|úw÷EgÛÝýãñø~÷ùíáÛãËóÇùðúñþý|:¾½ü}úyø}þ/Àµä㊠+endstream endobj 38 0 obj <>stream +H‰ì—M‹Ç…÷ý‡w ÷UwWWD+IÙLbd‚½3id ÍÀhf‘ŸçT¿WF¶Adá`B63÷Öíªê:§N½ÿæîöÝÕõåëË—þyywùö›‹û÷¯®o?>Ü]>ÝþüøÑŸ~˜Û³¯nî·'?½øêååÅÃýÕ»‡ëoïïnºüøÃwWoïßÿúŒûW·7÷Çþ''ÛsÍ>íéöŒW7?nO^¾|ñæÍÇ׷÷÷W·7ÇÒg/îî.þµýå—ŸóöìõåÅõZ”öäþ™åù¯w—ÏwäÞ¿°Á>ßP†Õ/쨟ïð–Ë©ç_NÜ‹›7ïoï>9x®?¥-mß¿}ü¨mOžnßYð}zÍÛ©ì5áàö!¬y¯™«+?ZË[ß{w›ÛÜ}ø¬[Úëeä-×½VÛí:ñ­·Ò·R÷–SÝN5ïÅ:³±”íäm¹ø–y½TýØÖëî=Ûæ{Ó}; ßÝÓØxdk½ñaÎ}¸~Â_­Ÿ/´¶§^9¹:'®Ì gSßNmîÙª{²N™‹êys©u/y²t̽¤ÚXÚloæ2ᩉ©·½éN}ìÍÍλµBžœzá˜ÌÕeÖ\é}ŸÕdbUéS¤ Öón¼ë=¹Vìm¸ Ú^‹‚miï&¿´Ê›sN7.Ÿ<'ADMØ£ì©4O6c7Ar£’—Ò>›ÎɉõÝŽËÿvÝý_¯Þ'wGñ¥íÙËÛÛëÿ•ÿGöÙy{òâæöf¦ ÙBûßõçGð¾/S³a} +ã u +âàÌ2uFÙ6á2ƒ_!fìs¦^¶WHä ø‘O© +jÕä8 3å)¸zMnµØxÒ¶¾³LùÚ´ˆÕ#÷*ê^i>$\¦%ñTÚU + ÎaÞTË0Ì’…°)C߇O®²0´  ÃkŸ^€”ƒ‰Žk̈́˱×Î1ŸøKÛÞæYX#žSã! ¨ëx¥Ì!H‘Èš×e=“„ð›ÛÂèÌIçdq)×AžlFˆV`8mŸm– +Í}פ>‚~à'ñÞLpÎàf#>Glœä§„‘gi‚zNªî&ú«z©² ^KÄJÊ86¶f’ÞÌ'N¹59ÙàmnæÔi…2þUü3¯ìdœêy(xÏš¢á5àMîh"5±X6ã‚Þ‚vg©Çµ‘%‚xŠ.åÙ¨#}¥%“ òõ0ŒRW©å* ªXàÖ Á+ƒä{Î&[¸Ï²…¥•r¸+5Ý…uÊôëÃ:fóx¤œ»EʬÇBƒ¿Ó\¦Yæ¢ôYJ=N$ï® »£ÄBß[T +&*Ä—)Ê:n˜-ÚDXçÐSaõ¬üÄÂf㨗c«ÊV}0úicñ:ÃX(ÞûUV€ ŽÃ᮫?Šk¯ª­¯ÈL]cÅïs-\ÝA&:iY÷fG°‚L[é”Ñׇ1ÙXFŸ+{¼Ð‘šìñ޹ÇÓò.½çñà¶ÒBï5µ,¹ó¨ŠYÔ8U=-.…JkéÇVŠÛ„uªLhÝ¢ÉbD›¶Š6‹Œš¯ W¹7bSJÁöÚ Sû¹Ÿ£@ŽìÁ%P™†…¢è]>$‘ˆp$¹‰^¨M¸¿ËºtˆR›95>MÍ9í¦Ú(yÏ-+<* ­U ¼ˆ„êµ(¿¢$x@h)  $'èæ­ÎzðoÚ¤Uy¬€ò$⓼js ÀK;’ã".ç؃; Ê ð´°›¸µ~A¢x·IA¾ˆüËQÇ0´'*j+ôAyBr"úžc=äŸÅ¤¸ãY<ŽfJYÄ…3ÀT=¤¥´<¤¤šã÷Ϧ'KѤ`=^Ã<Æ“Ç{•ƒ³»‚JR¬9e: 3(@¥D6 Ò¶(B’+Kaãµ,ÃJÒ¢j“îר¨…ñ]}ƒïÏÆ¿ d2 HÃóµ86µ0ýÿeÇåÞwýãwU^í¨¼†ý¾ã‹«æ 1_s ÝyKªA*¾nTý±Q:ÉR"]ï#õ]æyñhÓYU —xh”hp’ÍÚ;,4KÞŽm&ÁÞC¤´XåTõT”Qm_kb2ÿ4ït° Dõ7ÝwÔ#+‹:ë,ÇÅ„ãÕÏs°¦áˆœ$ÏQD+z„'K‰(«~TwíMþ¿ üÿçËïgÝ~Cc£ÔÊ@¿P©.môÒ¶ñ£ ÿQ…(N“¢áÍ!Ôé¨9Ì(ܘj¥`¢W·­Àÿy-±º¾Û  ð=êGô7V×c+­ lôžRÑЦFJ –¶*š­cZÐ+ J2eôܦò‘Öä¨N±oyJe}_2Ž*ía1„î—þ—à»g8œ>™hIqÅâ}ÚéÔ`EMe- IŸT¿”‚QMÓZ©mTñ!³’d6ܘKßAé>7ÄGKS†A,P.þ{?O ´ˆ†6_ðiÕèrU¢I©š¤´¬šýd›™¦ «òN€.‘¬iCƒêïC¨®YÅØò!7èrnqp3„d¬ÄÌÕ!ïCàŸ„i…\óĤˆ9·-ïy„ÃHûCý"áCØbŒÆš&éjËáŠ~JI‘z[Ž$k~2˜“ &!&D{b£/ÁBáCdüXÛ3”O±–/iU2Ú©Jɇ¥#ª7‚nžç1äÐ"¬“sµEDHk¦È"…+ g;‘ob¼êç yaJ´Ïu †Ýyc2²ô‹ã„ƒE`²ã¬– KKh……®{Åeæ'Ô[_"/&¡©óœMÊ4ÑŠ±ÍŒÄ¡¡TžwÉýœR(-YPG>0Øj“bžTz,Ò0uIÒd‡Þ™â]ªû³bƒÆ;)Û†KÀõ˜ì¦DáP=6©6‚íSÓÒP±Ž=Ú +n¡L¶!Õ4×UÎŒ¤”5E¢­ÌV‡:ÇPÒÝÓØ!R5ôVlnš‡”2rCê¦d(#žGcÖ±ÉP92­1@k"ôÎKÍdCñC ACF´L[•LÅ «Ža|X»¨ä!Cí««†Ö-[W1Ç1„ ³ÖepV¢tÖ.ˆ¤ZDQÄE2´šbQÄ)^°“$|=§°Š¦°Õ9<õ ä0Ì—é9WâãbY(’Ÿ´+Mv#Ôz¼ÖtÝÍsޤ‡ ^á†õÄùJ3¥—Ô¾©”.Í•ÅcÊ«¨ˆ(Säçyá½Up>bÞSI¤²ô™|U.¯²4&ÑŸ F%3J¾0$ÄQ©ÌzÃMbåVŒv"da‰{šºnAÿæ½ÌqlÉ(ê ÐjÕà<ìA®VЀ,íßÖ9|ÕÐ ¡ Éú¯ø“™dÄ;Üœ±½Gà~¨N9›°Ÿ–ùÈìAZÆ:ùÕᎡŽA¢P}ößÈk%GùgpÔkÃè#$n^eX~þÐÏ©ãJ6÷–x¬eÚ•FÏL[o¡Ÿõg£-]õÅÕà;k3Ï Y*‚)QñÖöÉ”²#<½XHIp äðm«É·4£rPD‚ÀÚ_M¯z5U +"O4Hreá¬(M1¸]Êyà © éC¸_‡®9tWñVæâû0ÂCÉ)‹Fù•“P%Û—RrŸ(msÄÂO÷пøk=&…¼eç5gGž¬rè‰דÄj”YB|ÁQDä —‚½@±KùÈ#ÞVlé&P…;Œî¹|Òb7Å#)n@{š|è óÚ„óˆÉÜ«ô·1¯‡×¢èâ˜ytŒÇàž^à +_Ýß8í'Ñí´£ÌVx¥`À%5áîJ ’¾á•5Ó¿ Ü4¶òno5‰«ëíÕÃSͧðœ4í¹õú-n¬v-ð5„•?Wð<$±Øeð¡}I4M¯”.^öpeöé'Œ†H»ù=ZØëBϵ…;òíðwuÊÓ@9&Ñ­;nxv-,T¼ž/@:å;ŒÀ}Ô -_•£G$ä!<(bÖ”Þ#©_HF»O,ù¿‹”²õi1 ÇOêšóCúJð:Y +SØËëV]„ ¨–IÜ™o†‹ÑP2Õ•v’æõ›j`•õDã9WNŠVëÁAÆP™” °Ûv´eö1ÐØ@´oA½ÛŽ3}G_2R¨‚羃ëC/‘”…Sðx^eàS0¿•ŠÇû.¦(w{½¸’9öÝ3kRM±ú½dii(Êóh˜v¾?‚€?ˆùªùH¶4‹ï‹è÷zÁó¾[>ZŸ`ãË1Ša JY?_$—ºèäûºGºƒ(Ž…_Yeª=²=zò¸voÏZrÉ]vuz¸ë#‹Z‡_úÆ™xˆÂÇÆ¦][›ÈZ)‘Gÿ$`A`YÁò@éPOÅ;að|å]sŒ‹ÓòÌõEg؃ +Ä#e6bvN±u¬ôj+±VÖ3bßSS¯å°BËÉ"׋v]ìÎÂÈ,Î#Œ1¦uDß{LŽþw¨Cý>‘Hheɺô%Ú,0ì‚Zg©-„Ø—´[“­¤ £¬¤ µ¡y©½©mÕ–’#X8»Ô Ç6@O5µ¾]9ý JµM!‚9µ Y+ˆw‡Íÿöâ¨ùùØï¦Å„¬‰U„÷bÙùaª-$•#gÚãhKÁÁR$%N%—Föý/ Zƒ5‡|à ¸Ñ"=¥& +€B:p×_b$³(mqJ[¬e$·ôr~«…ò±†+Iü;Ô ¨–ùSy¿>ÒäÏòC[Yæ¬`ÏæÂ¾g–Ôp924Go§öÁvúÖ«ãëveR'R,h,$/â„§ñ-×`'ý‹Òþå2èß{¦Ãé‰RÑÚNk{Ðïá/âïrgæ¨PF‘rgC§ZJ®­†øÝïV*z×zì¬Å {§1¢N¤As®¤ËFëÄœ¯Z@ü»î¹éÔ¦ÃÆ¤?c¡hg‡àì'd•_ÃßÃB¬)8,Üövù­é/šµ>{þÝþíÿ)<רïÊjXd +¦Ÿ0 ê Ðdô_ÁQ,¯“_evQ×ös츩~w‚ôš1{éöö_Â@š{@äÛ§ftú°ÜÝœ»½ÃÞãà~#ç3yéóÍ®¶G âú7»Á÷ÕâBryTA{Ög'ä;¢ûPóJO R8´¨0­‘:‹þõÿaJ8¿“ÂÝ0>œœ³ÛoóW)zU-h\W>°-ÒÕ•šZòñÄ¥§N ]ÐH +1ßio¢MrÈþXŽãÍq\+G3F +·y;ö0«¨¤.ÇB3¨ ˆÎùqñ3å¬0i©8!¤.x4eûo[•TlÖ{>?¥ÃÚQˆÜw¯v—˯UÚÇa ×`XC’uXËÀH“ Ò3´p”£HÏb…ÑœªÞƒ—ÐÔ"=êm¶(Y&ГVãpÐçã ]ý#VnÜJœ/„ü(ž7<ÈOõ!Mñó,;܆®õšº_¼Bý˜š‡V}Ö3Ü, ½{¸Z¨F ŸEѧ§«m1|¯gœbÜÑJÚR¦×q,ÎÍLï)–RuÄ@k/±ÊlÖÀ +V!ö"X3Íh5FӚ嶦û%O׿M_}w®e™Û#CJ‹oØá[ÚxM¼Ìr?A@sº¬î×c ÇJ§Â¤ŒãëK4-› ­Žîáífõ‹a|…éV⾯Ê" R´vFZ@$5;ÔõïaäPBä¿>bŸ8Ê3¾ûzdÆaÄÙ1€|ð´t€ïŠè¡©8¥eiFd,c^ÖªÎp©`h{>Í,Á F’¹kvn¾»;ñY+ÃH»;1M ªw«·«Òò- À£½dI7O]ªÞÞVN—ðªoþ\Ê,&Ó3ú¶]Ã’ˆ»üX®á§ˆ+Œîö™gŵüÕ"šºƒ‹Ì¶„;ò¢·ÌLû„›ÿåÌ0ž~²Æ¯ +Ká«ßîiŒsÅŒ­­ ( ï•Yû»ÄÓ æ€ùúÌøŠÝ7 qvª‡C¨[‹_åÖ ìñöÚÒÐÈu‰µ2cŠô[ëSÓUêg ‹Pär?«+§eoÑ/<ã¾µÓÞG&5}›]žo¹¶¬fp´<8aö­‘ãê[#T¾íLèÈû”gâ-¡µf-‚Ó\:mí,-¾?ƒ?V»Yó»ÎCD9wäÚ3ím´¬·UR᣻¨ÿNˆ¶;Óçá%ÙW¼ôõ̼Ðbá¼1hXÃN´„b{ÈnâAK>ïÔK–{»»ob¡ýà–Ø÷}sò~°N€¨1P·Ö÷í9KÒ½oy°Qó‰_ø çqYæx¯Ä9Ä7cŽë3àžµé¥¿¯ns¶¨µâ Â19RÌÞî¸!Œ[ÆÃDÿb¿\z³:’0¼·äÿp–Ìfèku·X‘¬"¡$Š4 +»cL@1¶öÿŸ÷©êã AeÄ2ÄW>uNw]Þ‹¸þ3%ä°–áÀÌì‚K¢?± g`ÁŠÕö'ÁLïñ°x‘Xd0Óݬ"†6„ž*ô¸{C…8غÒc¶µAå/¼›Ë£±™ž¥3OÇñ‚£sèíHa}- n¹;µÎÆÓŒÕ 標£,ÜðiõÜDíJîþöÂ2ØcI³Ki©Y|•Àʉ Þw´„nýz]-¹>bt)¶bÇF|D·)8(BîyœŸa*˜mÖº²n1ÒJß¡8“¸Ú1ÿÌÅ1ð?xž`†+®„÷ðòŽöxݹ1q@Î5o´.hµ³[û²3tÕ<^3Ymï|¯'œj£ZŒGIj¤"4¸¢7H/tç¦ÙÒTLŸ¶UJ<Ö]ïˆþ¢S.ÝÚ–^üæÔád,tðÉêÅ¥E—¤Þ¥š—£a&¶Òë•-3þjqèd.Sd’Ådúüòjo6e™sÓ,4ðn©pÕRh +çÆ®×Mè+GžÎ|ºèäÆÄͪGœ…Á’^g¢9Ρ”s ©fƒ2£¦'îRgüZ[åk&6²žâÙv}¸|_VC iÅûöwû††¦r°]›˜éädOqvz)¡°DZ´¢k¶kHHÙ>¤»oµjœ|M;€DMµ+OLØ=–ÀyJRߎb¤ù<ÓÀ%qEѮ’⋸«’þ”T¾¿I03·sÚ>ós;õâòâÍåÅ¿¾ŸKG:^¾¾¼°ãIø9Œ¦Î6½**’ó½‡1$Ë™„1XÑÍꊵ´ЈԎé°m]±U‰bvFžÊÓÐŒ:Ñȶʔ¡,ÀÎாvªX£•s0 ·‹O¯Úèšœ«VÂiÙ§‘{@wqÙ*¾D„ÂN½n†5«ÄiÔò˜}^FŽM„ +v[±±7äoÜ C#ž;O #‚¢.Íí¾þS™Olš¡[áã¡óìŽè:áØü[æØÌn!Àù4¸ãŠ©–/øñÛ G>ž<¿½»=$b8¾áà­sàŸøç·Ë‹ªµ¬mñÁSD¯-D„fõÐoï&^姉%ô³l9G¨k|jCøZ]x†º9ù]D÷üž+å"$Ñ#…½ö·:@À¯ÒÁ‡àÊQG¿ ©AŠ& i«ë« +$HLðˆr*ž(ÓÿÎTÑÁ)$ž’ÜkH1ñ;âµÌŽæA!ùœÎ åôô{þÆÒO—UÂ2©W*:BªI·¸`…ýI]›ÓõòZ¾v&Xõ¶,™BIÎà("sl-µ¾W\¿ecµëÑAr^¡ì;ì¢TAo1¾ª‹· MÍl/"„Ž)ÓU&"\á­Z×Cç+-NÑò)ª]Üg.³†Ó¦¸%oãÌ®‘ë® öEüY@–Z/B®çK«5pšØ­Ji¢Ð©@RQ+µþ’]ËÆB<½kó$2§S*Ä誙ö¹:\8J™°ù¥XVÐÃD^ Ä`4•J+¿&zH嶬á˜ðá#êJº…þ®àŸ†p±éÃwä¤ržI­ˆ$‘ÃÀêÍÒö +¨a Ÿ6d#²zôÇó5Ñ—‚˜q-´“ۯ뫋x$mÞS0åÙ¿‹äÒƒH¯@b©!†‰”‘âr:⃑¨úëoùX82_¸½Å9穸)ã*=éôîÆS—5sK¥) +ªˆ³âÎq}§ª%f€×ZuͶ’14T3ÅokWD tL3DòBSÃÄRÝ«!dîttíaÑ^ ˆø1«µê9‰]®™ß½u¶KnÏ?£’.×? ‘ €9°»ª‡óÁó¬…è> +l(|a‡î-°Ci©I¸%×08&YMZ.Ùmld¡ s;OÙFDý·]Á6¢G.ÁÊVu]‘Œq½'óUe—S©ž‰¾8›P/vPŠÿ€ÎªPE¬1Yyno¨HÉÌ# ]»Vcª=ûÌçc«xdåý*I2=Sð”ZÇ«HâS†PnX')0¬ »)•â ÕHä<9>úSR>‰$¢ºÄIóò¶Ä…\©£Ä¥›×^‘T£4hª}ÃÖ‹E“n¸ëU›?©Zý²¼F~tअ=GÛÚžõ²ÝÕå&` q|ëâBÍÇ{1’“ßj •\¨$zöá*ÆfÊlY“•Í;p»,ÃI ½ÌH:¸£ìÓP­±<…eÕ «Æ kyCoW,¤¨‚-Ã@DÅ|ÍZµX™ÕäY3<•'éðICôzI…Mm9é+y–—n¯§ëJÅòÄ‹oõÀVZ]ô.m[—Rý`¼¹ºË(9Tè€èˆd¾ž¹uÂ-®ÔÕ§[È[©zMºåSQ·åºæ™SM,(›Ó5žÐC°[--G©ªJ½×h±bÂöÀíÚs æ”ˆÀ +h]ÌPýóy-îAç`{öê9hrá º©«É¼)©à:y€iŠi(–?Vº#vE¼§s +wËc (÷"ñ[•éÌ].%;ì¥.øÐÐp…´Â²‚ÛÙ­!˜&SY%ɲ’V–šóOk+ÞŸR`¦vF&Bô—ýÀ¥‹ðÅ×Ó¶_ÔI´+–áûåì·°%b~›ÎNZ¡§­,'ƒÙP÷ä o~ýÇ`LØ8­»Na4îÍr²}®ÝxÊ»\Nc»5Ì^ã ¯5e]Ï-<•[{óKÀY—ã»ü¾¿¥éÆstD v·G¦/¦t“îˆÔ"h¦;”¯‚ˆœ›¦ÖÀˤ³5pFUÕø^)%kOÖÂË’Už"ø\!ú\ lLó(¤rê‘Bá­Xh3œƒÔä.5[½‡p®3èª_uKà¡ÃzM’ê ôICo—^cºf™žÂDʤŒR‰rñÖ©QÝó)›]׫lm«1ë&,n¡‘çòç×þ]³˜IŸ8yY¡‚>SH`ÚŽ›©„Ø!Ô™vé>Õ'~„d^n®Â£$§H â ‚þ÷›ê÷Lþ 7œ‘{Ðâ¡ ç%ÿdœtÌ7_°kéxúÝÝÝÍñä÷ç?|wýêáþÝ›‡Ù¶»ß¯?þû×w¯ïßþüáîÍ»›ë_®ß_¿ÿÏõ‡ë×?¿ºûýÍÝLJ×ÛÈ­ãé·÷é÷ßß=ÜÞïü¿àu‡¯öÙÿøÿ|<ýåúÕM<”°} <û<¹ü)aiT¾œQ?Íp/úåŒöiFÓ%£šÑeS¿’aŸfZãóŒg_oÎóÛ«·w>î:?ãŸË‹t¤ãåëË ;žüãxù«—A{‘ÍçË7çý +…»ãb^˵ºÀÌÐå:‘–¯£•™Y~_yK ñ°–D‚adA ×ÿ—°M8QVŽÌV××7ÓôArIHâ ‘ž}IŽyk#)ÌŒ(íìh!j}…4èá‚ÿŒR4 MæX5 çŸÌÿ*k-b÷ZDžŠ!¬žnd zO ÑÞ‘ïn.Rè²%!žS¤ó-(k©€Ìkwk‰GEèäa®|l>æIøY ¥Åwø­ÃÚª\]¡Öýä®ZϺüø7(ü +ÿ(äãÉóÛ»[i£ ðPüÄ?¿!pÐýâ>‰ T5Íï‰aÓ¤ RÆ·±¨Hö_öËž7¯ëÂ…eBçû©lU\nR +MÂ)¨ÿŸgfÏ¥…H£Â®RIïòî½çìÎÎÎ 4’¤]–tfò&Ây»’ÚWÅš•Õ]ØîÖž1F’Yy„´‰¤.©„’“°,11ôñf²ÝSYÖƒCZf¢è¥³\EzñbÔÛ²¦Å~Tv¬ä_›Ãî† ß˜{·›¬¸çsÛàܸÈn#5µ¢ `ŸF$á²¶“–Ö¼ G›jÊÔÖìùŹlRn1ЩâeÀK›õòŽ©c ˆÑ»gý’¨ë8N©8uq®ãTפÄÐãÁ «IÕBv’É +P¿êC×ùìoñz>-…©ì­mÅ‘7žG&â¿ ?ŠóŠ»RÛÑ.­#•ÝÔPбJqTŸ{Q4!œ”w*”ù~õÛ:ÊŽ4µäv¬¾wôG„H #«£«]f©)†û +Cmgµ¹–R…9û`¡ôt¤6-—Ôœòá§ðäM køÎܬAM”`ŽÜKØF š—´˜}ªQ{›ãw¡ËÌÂ/¼ƒT½†¤©–š$ª‚„UKZ¸í9ÅPÏ~ȵQ ¬È’¾å÷h±“â¾Y±é ¡€p=ì1"gHD®Ü'©Ê xŽÓЇæ™ëVn¾ô¤æ7úW¾¥’œ|eðú.¢ÜcneÍj:–Mµ]¬¸¤#ŸÕ‘3 ÚðRð/ÍÍKÞ ƒ3w4¿ÔµŽ¹e Lµ"_Ê¥Ò5ù€mª2às̶>›üz_Üsyaß~{eþ àü5 +ÆlÍi½Ó:m#ªê’¸bð³°/wÕv’¦/5‹¼ +N£¬éµý¦:lÜ®d+Ð6°©Ü•O9 .`ä;À¼².KhívËèìõ"ìÖlñ%ø?;$þp3@R„Ý sëÓ´ b•äùz‹X¨j¯4±d +Œ…bÄó Ë®J ™¢Ú¯¬6CýíÕ—_l­ãÓ(6K:ÓÊùuÂÊ™hÁ̼ÀÒñ»ÆÆQµNBêÏ]” Aø +äµYЉ² ½`“ieíe #ŸŽU Ø©&v3‹Ð2ð–Wîdý­Y©Ç·XjÍvšO3Ê2–kµø¡±[diN†´ïIb]ÆR³'i¦vbîA1•|{â¼é}÷-¢Ú:óô`ˆÄw»ªškj¾H9U]{;ÐR[îEÇ2xÞˤa PÒ/“™ºÎ>¹ —âÚ¤ºëeå«ë;qAA#•ÀxÖ±O©oÒÁ³x(5!„i½åíóµÌ‘GÙ9Úž$ýÁò¨GNLOÍàÙwš€*ÎH0 Óc³ª§¦êÛžšK¿ ÌôT³G‹¸‰ +øˆ[]ÃÑ$ë9s &’?J€ýu°C×ZqŒã=D¬–ä—÷äAI5û¡9äEÐù8sºt1veó=2ûS¿RÌ£µ,"j\ØL´®œÙ7Åk縞ÿwý%KEU®}ä±Thz@E5){¹}Áª‰J¡±—}ÿ\úeÇÀá’Ï”Ý&À,±5…»S¡ÈvŒ/<»1ó’ô€LUšŠõ²s1y•=›ʰ_”sÒ@½)n…00"åj5‰Ùïa\ÑvfÝòÏ[Fåk˨HÆ4Z™¹õ±Ž¦P™M›š¬­Ô½$K%6‰&Öóf›fÍÚÁn)ÓCOÌ<“‡‚÷ˆ0 ÚáRó (é™q¦«°²ÑV~ÙÜâ'ö»ÆÉ‘ ýÂ7áêL¼Ѩ¨Ÿ¡Óï&^-RÞ@DÚžN Àn14aD¤!ègf´¶z.³+R<ýF{Ö‰Óù øQ J°Í$Å ~¦ÔDŒP-œÐ‡ºD¨È*-EïÇ Ê¦"sù.z-¢Á¥—ó—ºöI<ŒŒŠa´°e~ƒF ë÷èü/Ë¢ŒK(sÚŠ´rÙW–v\úÍ×gDªä½"2;wβFžN«ÒžŠL÷™—WCA¦"£åÏg›Ež"†|¾VŒ"­©Â[®='Ú9˜c]éð‰fè†16ªBaªdU[qõ¤k£z³ïÓUJºOÃmTÐ;;šE½)Ÿî0ßê«óJäN ´ÐÄ•$ŸÝèZ¢û[bÓx˜|‰È¥¨j˜‡\Îf²ò.Žìn„hGl\ +‰ÚÞ\NÚ"¶¢:@¹H¬q.Û]#¾mÙMĨHÆcþ0.;#²“g@3¦:kBÛ9¨&Q"ÜAOô4@T†j¼k —fµ#ÄD—¿ÆINÔÜÐKÝÏãd +a£»0PÙ9§¬@l©ÄcÇ£ƒ'RâåúçîäeÉ}pöóTÉ3>K9ou”à¬ãg¿ ²ÿŠÝä»kYè +3ŸÝ•Ñ’j"ÅìRéaéÿ•<Î÷ìkÙ–ÓÓÕlã‚D#†JÐÃ`æ5Ë­yŽÆŸº1ê×6F^À8ƒ$®4+@ç ÛoB–bòd“·Ä[H¸:Eý²µ›!›œ»"êÊ +\æMŸ8‘K“„%‚ÛkÞP þe¡DäMc´Ö1?˜!-àq#7W½l’– yKVDs ,ÕÆ®^µ`JÔ‡˜r]‰H 3'’\e`M5³–hù·.…23ìѨ‹Çõö!µ©ÑÈè$=±j9YˆÝ¢ –UãE¸9¤âZ²Šô…µ_„ZtLŠÔVД#–¥Hžó¼)KQaòî|ofÎGt–=VYC‚V½˜x“TwäM‘shÓiài‹Y‰`UÆ)KUgì˜ÎF4óM?–çð9ú %–Š)*©ÖÅ¥‚ŒOeÒ8EžÁ½D€ºH†–¦6[t¾o–”ö¼–U@f†tk|¹iIFD š½±VDù¦ïâD Wt,¯©ÝËcè4mÛN¤jì+[P®1¼'h"8%æÀ M?%•ïHñœ¸ów'Ë —óÐÜDûæËùzAd§›Wß?>>Üüí÷ï~øþþͧ§·¿~z`пÿøÏ¼ýåé·Ÿ><þúöáþçûw÷ïþuÿáþ—ŸÞ<ýöúáñã§÷gôëÍ«Þ?}Ó;ž^?~zÿtò¿e¸Ã‡7çìŸÿ?ß¼úùþÍC<$…³?üýúçÅ#~÷þî·Ç_ænÚÜ]?çnCBL $ºŒªœ¸„¤%³k-Ø’œî!nà\ô6¡>[³$Y¸¦,"Œ¢™[”QgºÿßÙÿ¡³l«öU§¤>ÔÇiQdæèU®‚`^Vµ‰öfïúoµUÔpv<ç™îv›ËÑžc -ך%ÌeìK¡[ÉýõlRì‡[‰®aWƒ¾êUo!dÉkÖ–i¿õtåò¢>•›%QMÿ¤Ã"ÜZ"2‡VŸ§døåÊRÂÍ“â/¹^‘Ò¤®X&CöœjE´æêµ§ÔÅ:¹Eoû‹RìîPn²a‘˯gßj˜§ÊÉ¡„=¡.æ…ýê,g‰\;;Š@Xç|¸«íúÓtIk,…0.rqEiTE!ítlþ(+!>º˜`y)CÖ®LŠ2u‰N b§PÛ´÷‘ΘÒS•g™§iR;õRùŠ&¨Be^½õS窚™Nùlûnµžê4&Ew÷ô~3©k #å<,,Ý­UÖ³j_£T£ +<ô³_õ¸„É®>p.€uéIê™]ã^6çøû„Ávv2·™C»±÷íµssr{ª¥‚ª•íáSÈI!eµ‰†»{tVT»ã¥àÉyò©|a¢þ¢¿J±×»Yy”Jæ&^­êÈäçÞYˆÕ4é›RIA„øŠ&~V6áŸð}”îLÕ”nÒgUò +N<=ÌØY´}ƒ ÿ†÷ÆpcÑÀ—czt¼FYD×VXÇŽ‰y󠤑›S:Ih¬0»Ø…Ê€‚ö­üa-Eka2Wõ¯Æ1·:h +}ó…Z>N—ºH" ®opßÁkßÖ¶Ûò‘7½{— +3Ͳlk;ÖstÝ«:»œ¦Hñs‚h­ðè£;%¬³à€–ĒЄx ê'ÇP$…½HAïþ ä@¤† Ÿ¦Ž‰Þ™žü£8âŸå² ö¡*­`Ì*œ+ÜmN¶< ¾‹£ Qg< ËåÀñú`¬O]moAvýEÞ:ÑÖúí !ބՔ +›káûQ¡5š>%ÖÍ mËÿ]»ÎËRpýxWž`9vÙYÜ©n?ÔVË1I¢¸ U˜ÐÈ£ŠÃí];6{}¢ 0ÙÌÀ&ÁTÛNB–eŽâá[’ÌÝô³Æ‰ßƒýOˆ3X}pš¹|h6ÝCä}™ø +T`zuuˤ²öÂ&ÑÙ–|j_·öU\H݈‰O;|@?Ùí,Óû+kþú¼UF*)ÍŇ=ªÖ|øMœ¥G;‚e£š¶"“@³Çÿ (ÎEzˆÃ2L\¼Éì`ƯÁÁ©NEpÃÜŽU›—ÿ¡°Ÿ!ô¬Ým–axøC`Ë( =»ºn¼ñn7³<ÕðsR2Ò Áñ‡;œÌù5QƒË:uh½¤{a(­°VÀ±A±Ùf Þ”Ù‡ÑdQ—Bí›® ÄØô„T˜KAÞÔM^Iw&ŠZ$ ‹2ªìõÅA^wGw™Ü–ÙÂk—íÔSl}Ôž0@ÐEŽ$l²ÿ¨å“XïÉ0kZCê…c…•¥Ý0õ8h}OB{b“ ·Ÿ‘K¯ËFx8 +úا§¤©ÓR¬Ä …×®·•«ñ1î{æŸÙ¬œ‘Žžl¡I£2nk¡[Øù,ÄU%.Ü!#Rˬ‘13aÖê}WLt§ßƼ8Iz0 +ûÊCŸJ’"„@ðv5F$‚-ªü4‚§…?C3êÛcGé•™Iu‹~UûÍqr5HѦ¿ nvÿ²$$ÊTs¿w¡½¾¬ay+ï‚§(Ô8>꙯Sè\ñ`S¦’Й¬'^VÇ¿^ïÐ\‚~òA™¥BIr„YÄ‚²t×R‹,ë®± ˆÁ‰ì8¥dA¡Ô¸[p*Îuc mÙ†¶Ž|ƒE¿NÇÖ%ÐeƒÆŠ\o7âC;߈}Áû€ŽxHÖrcPÕ•m1ew€¦s^Kkf«ÅøæV=¹ýÚó³XâðÄIº3¸Vü¯zô+A²ñ«!”tˆ‚‹O¶¬³“k1Ž‚ôšd#´9•šˆÏq‰Ë7ÅØ\B—&U(? ¨x·¿ +ÓFAyò쇇‡û'ßýùýO?Ü=ÿøê÷÷ȇ?ïÞýöÏW¿?¾üåíïîï~½{}÷ú_woï~ÿåùãËïÞ½{—¢ÙŸ<ûéÍãßÚãñLJ÷osýßÐgÜáíó<ûÇ×'Ï~½{~ï¡ÉÕúùÇõ¯/žñû7/^>¼}÷åñW%àrmÌfå +Ô|Ä”#…Çü  ïÈ3éùN[©åC$ƒíÎ`à~UÚ`f )âÎ`WÅEoutí¯´õŸàÿ«üßTùÃÐÏ ýÈ¡[Šéá‘: õÊêÃUϦiÓŠ¸ÒƒÂÇù(õlt4Å +tÐÿƒÔâ’wÐiqÂîÔÊþæˆë\ÿ=fµç”ªŽ–`WÓPW?< =ÆÙU8i»á”J©ÓeQºÐ)jDÚ€R”gÏÀfÆFœž^ +¸x:’±:A=‡Ê tèðæOÓšP0àUKt ˜Ø×†pðsùËHR”[Úqž•Ðî: L{Ùl¡¾-Ê’ÂʇAÃ=íq V@c Âû4M/l·O(¢E®ùFÉ¥¯é"K5A˜°Õf%–N¼_.ítdÕߘ%‡+mA˜'ÍPÑ}Éóz- GIgn·4£Ê9† ûñVf(½2bP½ †Ê¤šgnCö‹ªEÏS˜Cf¾)æŒëz@¡ªˆÐñP‰ò;t.ÈŠ0ÕGÆÄÞÌê“|'›r)è‡X䯀ºÃ4ÀÛì=yd¤BG]àTÕ†r„SÐÉÜèæHaÿD#m(•"²)ì§ <‚¬-íO”´m5ü vZ6t|¬ËÅ&Îâ{q4@]{"<à_›ÿ›$,|tøüèðŽwõ„Áã:L1- _Å%<[U …5†H/Z¬¯—‘Îçä+6½ñl®Õkƒîl¤¾3H-Òᣱ’t›ñQÈfÛu„Àp!:roGŒ_{9ôƒ´T¸ËµðìâdÕeÍèÅMêÁ¾ç©¶òSÙÃú´/ÆPÕ©˜á{³7ùGK6ÖnsL·~czOXaïj+Î{íüNeü 2Q:/ z½á[tŠ Y˜¿`j[XKŒ¾v-4€ ¿“@Ÿ ™­”c\"B6äÎ!²r["½[ä`nOÕnpûØOÞŽHkTG"ç¤ï®èŠ\¸fói›ˆý1qòFzšíÚ{V*ll ¶9<^¸F¦×SZO—ž Oén[ÞQ/¼š†­êg&ì!OÍ’ªÄàOÉè +p´Ü­K³NÊ™0Ä%ôØ¢^“û¸¸ý&üãªë"a/Í/ÐÌ–¿R‰´ÉèGö|%V SÄžþ"ÈÞÕ»20ñ„±Ü/ÑÖ¾ø[æt[!Õ Ó6ï¨.þÍ{•$Ù•±«èrÌɵ·^ûµ•î¿5€ä{j«¡p¸»VUŸC2G`Ÿ[ŸVóyañµ¢þí‚"­»Û%“Œ•ßaL%€E„ ý£MÈÍqB0golVÂƒŽ’}þÁˆ=ߥÎ%ðèzÉŠŸ‘<ŸÓ• eì©Ýá 3ôRdA¤dê¤ÏYMÉi†d@†åªüm½øŒ_õb²&%ªGŒò»h¢‚ö²8Ð41¬+yX‡$P\Íaûò6T¢!¯ðâ2cwš§©È ]Ù3rM2'! µYYö•¼}>9æ\Á €tÅ’Àëmt"D^ÐåhL*>Ðh#7úJ–¸| Á}ô;Š„$GÁ k°>ÄQ—˜Æ0gýWs‘JÚPŒÛéN Ê‚¦Fö6øæ‚k.6„Æ?h1fÃZ·ÆÍ à.&²¡ÃÓ'¨f‰±&˜(Ï›îF_Q8ºú|ßäS“¥Éf Îhz‰Ówó`€7œ¡óè=gy#>Y¢Dúp1ÇßG»æ ± +Ä r?fH9 ¼ß<œíHdÕ{c’¦¡÷`ã¹$ÝFOÍýö`žh·ƒ¥$VˆµsùbApª jJ ˆÛiÿ>jàÓ×Xb?C>´/“1à<‡Ð5ã|'Ð>ïväªÖ- P*Mô]ŸÒgŒ÷³¦íÊ2$s˲Á6æÈï»Ø‹Qºˆ1$÷ÖF4m¨¼/lÓâÇŒ@b(¡O•‰KÃ"Ç?“-áò_djmôVfžé#Yƒ”%`±:avw`` ؆Z™ž]ª^Ã;”d–?ÇöSØ/ÍÕOÉzÈ=SÙñx[JJqVJ¥<ÉïUóo¤ë—²™ŽDõAj®V}ÌöI½?E={85ú$¦—€Êx¬C?Û!ýÊF€_ÖYÌ@îÚöêĹ¦îŠºji {¼õÚ +'(©ª»+y3y´bI4fÝ,©Ô)€Géé :øc­+ì–@V«"n·¡ +óݘÛ]dwQ<Àì« AÞ]‚hRN.ÍgÄØÑ&¦g;58Á@Þש^º~—5³6Ö€Z—jKÑ8·)&,®$“õç¦é¾¢ÍÖNõ|Õ 3aX+Ž\K‹†pÞ!^;Zþœ?¿8 ¯¿¬‡—óûÃölŠ©3G‹ÈEÉÅe³ìª¸ÑÁ³øôZ;£;ñ&q^Mëý„bQp’ðÒšš$åAÀ÷©îd„N¡5n²-’¬ƒtçRd³TW§pk—1ø%ÌY€„Û¬XÝMÎ?´â FÈ50‘ÄãŒ>7÷<ï®a½ Sƒ“0(ô0™˜cèãNU¶™]ó.F°t¶Z*±Úæ9Ú;¨ ùoØÎÇ=I2¶‘þå½È %O`±Ò¬qB(ïÒĪX çNo[´m3[sû13í:Æœ#À#ä½²ï:=‘í6z¬gÍéÄÕwÿW¦|Êü⥠Îm:¬èª¢¯À?'…ÀdÛž7Õ!°Ûß“¸¿¸¤¦$˜PzkÊï\ ÆOP]›úwÙ%Ò9Y3½1Îh)I0ר5¡lëAR‡«ÞÆóí–­Îß\fP‰ÌoÙà8ˆ‡Íw£-v]ƒ‚÷FðIy#õ>JGM²úÞÈ̇R8¼—ilص.ÀÂæÙ¾[K6ˆ´&Há÷í‘h2­¢zoòš¿òZ‘l.@6zÕkÛŸ]Ð:zÏ©yä´¿~8šÇ§Ü +~yÍCþëÿ@š×|7mtçÝHÑÀ¨yÜ ‰©È.Îu¹ ƒ@Ñ·Šwß |(Oæ¾Ë§¤“žV{~¬S½&ý”`Ÿ2àd\aRóÖȇK*±;‹­ž¼^¤• Êôzü1”kŒýÙé˜#¨r½eÕé´"hLi aS@ÖEDºˆàÅW4¤xez"Ú$$–¾Y¿óê±t«wi²õ¥(£"2ˆ¼s)ÒÄrô*Ýñ­A«j°IP°WGŸ>—;²“Âk>FØÅ¶sràz²c•kW ¾ûÖD²D·¸Ö¯•í¼–;x´|ï;¾ˆQl ŒèUà|"Aösl>GÁ²±×5ן¹Bcü¾‚²Qæµ;ƒ´•Ìl$9î?9—ÁÍ‘7VÖ| ØÙCòqÍ §8 Ïüš…NîʯÊvÍØvNÍ<`?P3|[k¬è9¶ÿq8y.# 4°j…€* À>RK˜Ð¢êaä„‘»'¯Â€ÆÄ °û* n¶6¸ °¬5õн¼mR“D_ÆŒnÿx{lh[ÌUyåˆg‰Z¶ CîrĦw›»ß¾8R °å@}6@‘EàÌ.Oa%Ƙ½ÆòôwÕ—?EëS’»ß·_Ó;·á—½À/‰Æ‰ëÍñÒœ +ÒìÔŠOïÒh1ð$\Qywi‚™E²M#f‰F‘ÝòDŒM¥ØÄnšL‚ +¶‘ýòÎû^CÅ Ä™*ïF¤0£v{•`‚ !G&²1?î>Çìnpß~r6R/Â×9þccµ]V¸çÛW´aœ ü†(‘K¯|†NÏ}©¼57KGˆX_=¢ pzu:ßâ¢t³\þñöj[$@E <õÝ(ZKPZL"ÓaÌ{£u ùÀÇl{±'ú~¤Ã§ÔN[¿O¿Mçû ƒ¡¶3ò&ŠG5òÉ—§}¥}~ù,fs~qò’·|‚¹D’Ì/B*`°o!G“Ô»+³W£÷`Ìa½,º.œìüD6¨<´ùîSÛ¸Gçƒ7{à²Ó9éM¡xö¶u7ºz(1«ìU ô² ën«>…ÿ”Á¶Jìí"ˆŒ‘Òoqz7î’¨×sË5è®+nêÂxì!0Ìw#ÓEÓŒ×vŒ·—kg»F*Hž¸^†vYD <Öë­ (‚c?^ž¥(ŠÒu$8³=Û¶g#”Ÿ+Ø;¼ãJÍ `ž³ØŒBòÙløllÝtÀP”³*DoÃËë¬?æÖ—¯ʵNmÒzkëOí·6qxXçø×eû&ïÓÍíP1!òÓk3Aœü~0ªì?¼—Kz]) „·r7|€¡qO{ž&ûŸvUI;NïÌþ/@ÏRu6°½`Sé1»Àx—!¿iß>I†Í*ò€ M€°Z%ÔÀj¦}Œý­† ÈmžÍ„°+d(•rúxX‚é[¤3(>î>Ö>AÕ>ÛI”s"c$iãs㎄Ôú¿ +≛¯Îá¤!·H凞£¤¡µÊϫͻLX"½B¢<ßœY´bÌUõˆŒ6!\iSøâŒ$1/‘ýè^…å8^ß|hßÁ»úßÓ].®Ø;•0°4¼ˆï¦ûÇéåîiCnüïN‡(AcIEaÏ$p´Ï “³ðšÜ2ó$³õ"“&™³¢pqì Œ“«|™ÀRH®t2‘eU½ +&áÙú<¢ÍP AvÆ‘£ú[Ëo¯{ž³æŒ×í]"ƒ³ ¯¾—aLó…Q“OÂ1Ö"6FþïÊ:‘|ñð~Δ&´ŸÝ9ïÉleË@íN žÄ–±b?vqö¹Ë|z> •­ˆÊ"?õ4n@ôùEÈl—»ÎF ³•IyÓL!v@ؾ¶æóØe~]%!R +_VÉfIÎ õd¢½õ»æ\°^…Ð{RއºååÛzRŽvµ:ËÒ!* ›«f0j2´ £ÉÛ3n`ÖÃøúùŠ._9jŠÓxO.TTdâ‘ÜÚdk]Ýz6UãÚX›^ñ³;õ0š”@;šÐZÃKR‚úbÄJ<þLtóBgä¹@³ë„­ÊýQ{Ï´¢ÁŸ!‚ˆÚå‚(ß_öbvÂKß#¾^0èCef3 ãÔ4÷ÎÃËT)^AÚ`“åÈtü¤© DÞm>¦ÊnL:j†»ë¢”_z‰^7¦ß"Ý® òr¥»÷ï¾'xlôº\ôyó¡ahÐ0 ôm¯8@žÊÌLû™È7%òa{J„¥BX<v~_H½DÑ=¯ ÒVªõêBèšÐ$ÐÅ‹ý4] v {ÈT!Dœ +•ÚN>@ì9‰¡<ûf‡ÃÇ­šÔe;WŽçcã à@¡K@VnP@bòÔ¡˜Y_I¢¥P¶VŸjGÏ-FÎ^‹¶MËaGÉH3ÄÊigE儬•z;Ðc^ר²_Œ6_›ù=¡ÉSÑTóŸ‘©w¥{רå̱8ÖÁ™.ÍIëËtÉz\§;„¬j§J5ènpdRÖ¶: Àd¶‘ĄׄÐwÓI†ÎÓ×õ®b „—åBÏ"4¿ tÖ|¶¢Âv”:i M&DCŠßäLM²1SÖVPsu“J×B²@§Kª5ÕK¢UôD-÷ž%AO4Ø1„F¿hG!«­dPPæ-êe£õúZœ2êunõV ö”Ú\ážë–ŸDšj„4ÊYkçÙks%íc”3¼!ƒÆ¿è”Ηs¡f9Òj/TçNÔ»½þ'*Þ“G<¶SCërÐŒ¿î»\§VY¤w5U¥‡¶gø`˜|{*õƒ¤¡Î=Ð(ŽFõ[ù L"¬èD8lEhéFB´Y@ˆÆœYæI#p|£‰¸«+c ƒi¢¡DŸõìdù$DØÔºê’-‡ÛDJ ¡žsƒh»{'BîWÑuZmïQ_DüŒÊHzù}Ѥ^”™ÝW½hÖ+í £))§±Ø=›íâæ«,·Æ®ˆ;ãTA­Ç-ÿŒB£rAq_dqƒicyó +ãàXûwN½¥å±ç¾¥£äü~~)4]H5Ii±Ëæ2×ß᫼íý]m?‚•”ZL]×âg§$%íCùi쬌"ud¢L^î½­PjIíõoÑóÓÕQ'ºÙ8µ[ÑOd[²‰—BGÒ§4ôú¶7u%Ñ`PÂ(W¯ë˨½ž—ÞÔ $)€ÆVáš]"šd^5M˜]ÆKÔm•[ëÔ½zNv¢s¥ ²x +Y=í7 + V7"_çs/St—Ýg4ÏwDÆ›LâP}޲žõ<ÖëõšÏü ´ÅN¿¶ IxßèiÏÓ}>®œ÷½¨VQ°Ï®;³‰*Vf’µÏçVL@˜\@ç3†ÏHªtUQOº+øú,Zßl*D»\ÔöƒâF…ÑÇã6.=wá®rÕ÷Lx”ÉG” Ú¼J†¹e¦ˆƒ +ÓÖwZ`JÆfTe­co¯¨ógSUÇK›YˆÇÎo:ãä#7zÌ Æ"µoèÍç!³ß}Ð<õ ùœDîãÝ-r£Æ*¿`cÞ›š'Ë•`O¯Ô ¡óó§±s2Í_[Ú¹^NÖRø"F]?Òƒðòݸ$$§í:šYI2fy€jHžŸç¾ðÏpø÷9Ä»±ùéÔõˆRú·­4Ûð“D¾æV'¦Çìgiîw炾3¥£=ÙŸÑÇÎw¨lø‡OFÅÑ2Ðd¶ëßÞG6ϹH_2½í€…R–Û4jšÈñž¥•ps¼ Å€·Ü‡¿h-ègV#açzÂI•’³ ɦf&qf¼Èš¹Ž· –m¹u­ ôàœCä®hõõ·ÉWÇó;¨Z*Z&^mZiqï¶h¡Ñ¨‚ê”ñòHb刧vŸ± ÷Ó÷·K*X­¡{y 2behS<˜oG½7Ýò†ª2TZÏ­¾âìÍR*!2ÚÏY|ú9[¯¯[U“a(Ø%îv}cNó<ˆKŠ[¬QìÌðS3ÔW9!W8:ÅÕjJç¹æ@iêWC«|2O×£mÕ}’¬¹q5äënÕ8Rs”OÄBË|B4ÆïÎ~YÜg™¶H½}±v5PΩM†3ÃÚâ?w?9­cœŠ[ TŒ9Ö³/¹EfÕg­Ø)‡úÔ ×Å?FªØOÈ®3r](òˆmQ`’±’7²¹Hw#ùv®åí(ÊR ±ÿÒÊ9ÇÖéwgÄýv¬ ¸ó&gIË!L‚Òå ¯ +c„µq–^À¬àå¦4p'FrÙI + §Ñëð'µ!¶áMÑa¡}YÎI ëûôHÍý}m° `±tºäfüXTžYxôrÝëÕ)ÐlÕ‘¾SŽ>‹z€´ÔJùé’ã íÝÎëËD•Õ9g 5µ…Míé`Šüi©FÌóůZéy˜ˆcEdm\÷5,4ì›Ú…9Ùëdhhƒtìjdö0×Ï9{EYØÆ‚ëÑÛH[g`êçÝcLJ¸ͦFE£,'lgÍb?sÓFc˜]ò°%½,5j“Öš€¬Wó‰,¦XY$ˆ™ã5ÝÊMëHŒêVÎØÀz5ìy¼É¯T%$kX¿ž*©zèåñ}ȃå}(€˜Gùà*'l¬uà ´à„Ù¤µÊŽ'z‹íˆ˜§äÌ#Åï×(>Š—*çVeÎzMEš°Xëvy]ÆÜ·¢Wz¤}ÓGYš:ö5í=kT4±Ö¾ŽCÅ7 ä¶ö&Óø*ʾW16¤üÎú÷S^^Tr˜îzhhƒfcà‡ÔÍCó»9e·ÐzcÑ+¤é<´r‰öJWà‹|–™uX5Bí¡9B•áìî]¶X•+_¥¼Œ]“ƒ„V"ßÏ­•}IH, Z9fŒàð:¤7c³š&SËŠ¨GV8ïªÉšRçãZØ­$7KëM2í•È…Õ*Øæ-:œ Æ“(±§1䦶‰Ø@4”Ì}œÛKÜâž^MF‰‘´C5mÐì›ò¹É +ÖâXѤ8$ó7IFávÑâ¥S˜˜Ù±—òŒu„bÐÿL!¹`)Y©L ªx©l+YLQêd€˜þßÄP•¦µïÕ ‡lÚ´ø©á\“üØ¢ÑF>›V‚5Žø°÷ñôEY +—Š<*ÿDéf>vÇ¡vÈ’í‡jÇ$Soê Ÿia\bk¼8¡r€Š¯¡Ó­A8¹îr¾Xõn4'‰9°çü¯NÝ:¾Hz;å!ý'«ä3àr ɳõÆšaÕ¼¬ôm—'zsÊa?OÅäëU´ +‰ÅA_ô®*$‡÷†¦oas•ié}½I¡©ŠÎUÏÎà„îî8+MïïÑOzl“AÊŽÜ÷ urƒm·L'™{²˜ÅW.íèL{§2‚ UƒÖs{˦LÅï[6jºƱ#US¬Lß¹¶yÚ¾ytÆeð¿wõWnøCuÓÚår:Q€lÃâ h{OµáÈqP»!;R—7[çUlCýëæ¸ö¯ŠvGFÖ»R]¤:×hÌDg^h ®YŠ;Š1±àe‘]©½+™"-­,™jŠ!㪙íó¸î¢Vƒä¥½WOÙøËŒ$}›0J" zêT#5yQó3!΃[BŽHn×Њ‚Xa›œWLÅ«ÀbÝÏk†vò‡5$ÏP!%Í[35YÒl¼J©Icv»00™Y‡c9Y9ƒ=ÄŒsè`kª…èzÖånY±ºU\ÙX‡cûä±áû¸æFºÆQ7LuD@†ˆüÍ98êöc‘_C¢±v»ü^FþDsÓåå÷÷—o~ûî‡ïï^~|÷ëç{ôìá·»OÿùÝ/oüøðë»û»ŸîÞß½ÿÇÝÇ»_~|ýøöÕýçÏïŽÂÕËË><þWïx|õðùÃã9O`_Ÿ€¾ü9_^þt÷ú>R¥÷WÈß®ÿý釿ûðæíÃÇOÿáWú¤{d¸oÒ~ ÿªA-”šÕȲ”`Üî<×-c%†€*˜'hhQq´tœ˜O©I1©2±oŒPgLÔÔÑ¥y¸ý·Þãÿ¸fO&ªþ‘‰ZRì&F+Fì<$»!Tg2lL¾„ô7!ÅF?3®ðX6RÏþ’á-`‘*P(¦7MùÝLÙ8Vd&,3>ÇÇŽìÓ0šmñ8<ÝVó'» o.ð›ôªïì ¾­–z9ÖßÔËYPعpãàYåÞ,z;Í„è's#*–ä{&ͤxA¸B!Nþ¢­%ÎÁb2Z®ö­S­ëž_Ä.Ñ.°hÍý*œÐùN×|¢ˆ]¯Ø î"&eN{žÍNdÉfÇS2Õd‰árš Ò+v¤Ck”¨?Ù]ºò*âP”aíqåK– Y!ù¤æ^mWûJµéøKÐmroÒ„ZŒäÚ”Y@*àϲâ®8JÁR÷ƒ•—WSü ¼¶¤Ïå.’¹$NNå4zkd®•^d´K[s"¢îHp'u“iäm·z¾ +˜+RØgRt²sZL@ÚÊ;Ô{ +ñªjm—•`+Ž‘¹pc ½ÌóªÔhpÒ>ɵd\‘lŽçZÕБREA”u'ÎÊ£ø¢j÷©rõœæöÝÑÌ9š!¥M¦%ÒˉJVºÑrL¦2ްÎ( ®f»öykœÅ 2ت!6Jö[>„ç8Ñs ¨\ÏJC™†@c°i0û>½®ÌèC<ÒŽ@Ò:¥¡añÃ7ƒ€<"º6É ¡ƒ¶Þ]JAˆôãžà•Ú~üJ•ÓJªŠ,S¢Y…qœIÚY¡ÕÚ£ ÅïkÈ﫼$`ö`é¢ÍàUÈBóÒÇ× ó+gCC¡ÍkSq»M¯ϾKêžD.âÈ‚\GÞ»u I2˜Åw¹‘¨Â‹žËºPôª†x©‹­E,Dmõ™^¥&^($Õ¸©/bÝâf¯ñáYW¶¸Q" óòƒ‘¡ríy„—vÌòuR¼˜AÝfºoX½j$nǘá"0À rMû6– +b(Ȩ̀‘:}­å 1‚ÜX™¤ F ÁÓTZa +Ý=Í£‘vÒ¢vn5nyrÊÓs·q™©¸#r«éøùÂn2ÝŸ…#ªÃõ Ç\¸Š%.|æ“ ‘yw¿órgÙOúDº)> R\3Rº<*±ÚÏ +ÚYQ°,`ȧ_JÚªg·òû'd˜Õ3:ªdæQK#¶·2ˆ®• †´| `^Ó g _²r€€õµÃ¡÷›Gǩоìzgd×s”`„\MI÷K_®ú§¾œïÆÄydO—Jº¥v·ï[|Hž)уÖõ––™e<=ÿG·èB\]´—[ÏE²Î£ø«ò'Èf³HdÀ‡Õþ÷±E{ÛNlK³ ºŸ'hsWAâ"FW0+r:n SÕ´ uÕ‘_Â7éxa±a+®‡·ÛŽSaâˆD-l—" B#±ª¹6²Œß]õœØ²ÞþyÃ&È<Ñ<õ%µçªßÆËç[”ýAƒ¸ÏêyØBúÞD‡è"³×„NÛ‚JE¨0éç¢"†!>Öïâ(?¬Ù¥xÕ%C ò‹\ÿF†ý½ójØ9wopD²»¥Yäœëös2Õ¡ Í«²:«òËqúäwÈR©¢ˆöqõô¶ÑŠƒÅ×l€ÝÇÿävŽÖâ·—´­?‚X­ˆ-t凓{ +“.g2RT™Ë9ÜÀ)™«nEÓ…'ðÉ-ü ÎU±Yä ¡"ë’N^ ø¯NÙC/€“Ék#äÅ«o´û¶éú Øš¥“ê$Öu&Yتç5f²Ìidëhx§Ž’%^o†¸û¸à‘[J°’ZyèÊü`¥‡<Ö²­•ëèáÿݺâƒt£M +Y!ÊÞgOm–bÛ¶þî±9G +饤ãkˆrSƒ5 +ò=rlÒJ*ëÖ"32Ù#ÒËØÞ«8â¤mœ\È ˆCP-'š||[˜lÏ`ÉÌŒ7´ýtq‘@ÆÅ)@®g’µ–‹ÍjÖbݧë„ ˆ’ÅAÕSÔ)ëV•ÈŒÊÇsâèÜ€åhbDû|æe£13 kω·³C…:Uò”½˜Ý!DSâ0[@îžÉ ë™±=ªóŬü#0ºÏk&âŠ÷<4ÐÆ v'÷Uªˆ2a+F6Ni#gÐMñ14¿¦3ïä(˜GÑñÍßZð¿$ã»ËãìúÀ å xÛ‘™«ÄՌݲ&#$ÚàØ;QŽêß1ÿ@¯sBuà­B¬þP“ X¸ˆVõ«yÈ»GýëBf È8Oç½.©¦Ž˜•vâ68Eœ_sR(1£tÉ®?SGk#ÜÇbWGšrl„6 -çÖˆ‘i«ìg𵘓°|èu•#¶!ìVn„©¬½f}—]ó¨¾ +"k‡IPÒ[SAdé—–UÈ apöëYt¥…eLdð½…îé2½ o¤ê%¯³¯­÷B›Y¹¿ä»¯AÂfIåÖŠ`oÇ)¤Ö{'† °½¡±§‚íB}Ñáñ„N’·ÞêØàeA!ñ÷öƒºœ°D®lO¤Öû¯L/ Êæ{,\éR"âk¾6ɲçJë:Ò3?}RÄŠUñÚ%ª3T(+²3ž ªóz¤û³ž…óI«X+ï×¼ŽsÞ#Ä`® + H×°93¥Gа{òœ ï(ã«@k¶üà +˜ÿÖ8zËRØiñ)ìXþ¬ÿVB+ömdò +¤iÏ¿4Ó+ÔËÛ½,F‹ö +*ì=tÝøG€XeÙ +endstream endobj 39 0 obj <>stream +H‰Œ—]oU׆ï-ù?ì›HPÕÛëû£½‚¢4¤E¨HU…œãäÖØ‘1 +üû>ïÌ>Á$ê Ï^3kfÖÌ;ï¬©Ž±´¸¶ZòrsZÃì}iaµŽåÉñÑIL}3µ¥ö5”^8–òÚz˜KÍë,q"‰s -Ö¥Æ5ר\1–µ´–—2Ö6£Ã\S¨q)}Mÿ‘`¢·¶”¶ÖZ“+YÉ’ÅÉ5's¬=–!A} àÇ(u)uctÓšy-5GÉz•Ca©$;RZNÆXNÉLjÙµF_g–GmÍQgúÚ+Úò§×M0§93jŒËΕFnSa„¦Œ`9„‘-®ÔëvU*¨_õóñÑÇGa Ë«ó㣶NYñ…b/A_åD»S´}‘B¦¢›J»¦bÇ2™NÍ5’ÛÅZ˜ëê“&Ô4Ea6M¡«­Q^ ç­íp¨FšIІr(Eø77h›žI˜„ª“8Ä‘—„$qLIJàÆÝ¦˜g°©ÓñšLÒ‹eìs€øCÈ4Wh »†ôl˜) Ȇùó0`Èyj7![.[íIUµ ÕÿÁ@ ‰ ÓÜG1Kàh›¤"1¢z§dRÎm“&Çk•„ò¡/9PÆÙ‹(´RZ°:s³cØ=°8›Ê‘ ¯%c!Ó¥ÍMoR\„QlR +©ÂÉ=Sr¡x²Ø±mŠ…ŒY~ OéJ,¶Í’P ó( åíBM›³=ÒG:6øUm°2þ‹$tD¦ùbsÆ'EÕl}0Á(ÄOiS ²Ä7@Þ&•Ejz[iÞs á³M6:ÅÁ+C¾®FL’ÀI¤±¦IZÓo°–¥2¡Ç†ÀvCà3Bµ£ @€+7½)ÌÔ¡dÄð“øZaR•æ ÓYÖ,÷ª^l¨ÈIŽ™íÓÐYeìŠæbµ~É-y¿tÍ'uŽf-‹«`§ZoÄzh4ËÚƒkè[ƒ–‚}\›=£nbFª×­§\Q,aNA‚Ñ$Eã¨9ÈyѯJŒ šSÆáƦc!Ún¤L<\Ç¢9/b†H2ú6q%ì ”Ç>×ýXï2%BšŠ{C/žn÷%H{öG˜kþ‰úéšxÇSÃß’PkßÕ'yÔ:ʆQI`ô‚ÈÛÔƒÅÉÀ&̺ædFò ›­fÔ  `'&ª“™–}ÃMsFÀ¡£Ì]µ~ü"SíÛúpúèæöë‹ÝíÅõÕÙÍÇå/’=8ó +‘.§/oo.®Þ,?~´Û½ûâúöL‡.ÖÑ¿êŸã£yÈ×ïÚ¬Ëé·W·¸ðmÌÏÏno÷7W/öo.ÞÝÞ˜¹>þ²ßL†åôñõõ¥­Ï/>ì/Ÿïo~Þïn_~|ûÓõåÓ«³Ÿ.÷ÿÝÜŽÇ.škÌΡa/½‹Zð˜5}Ñ«ž¾ØŸ]~F0¤õGæËË‹Ýþåîì’H¿¹¹8ÿîÎú§Î޻߼¿8ß¿;8q?º§~¹¾¹ý$®ÏSõ‡Ãë§Wç— ÊëÇdåê zðäæìÝ»ýÃã°œÍAu¨¿£ ÕvÂ)F‘mñ²˜îÝ´sÄÙŒ]ŠV[ypG¬d_’m%ZäŒÙÞ 3Š:Ÿ%:&’" +\DëQN‚e1Gîݸ¥"kwÓî”1†}ÕhI6f1ÓH˜ Hl`†AQÄNDòâtÀ7¡}%™šýÒ³ÑtbC'“ÌÌ#êâ¶Ñi©N}e„U>0™‡{ÑÓËY PªÃ#4/µ¢9 &J1FÐÑ.Â÷^Šúmˆz$¤œý TÂÞ +ýeÎÆ„Nb‹#,UnJŸñÑô™E¨6Àé7[š -÷®Ú<Ðæ¢ˆ(,åoMVUQàm-Àü!ÁSQS–pø;…UÍCcŠ’uVE[Gb—1ËP)HȽ«Ü¸‡%—)òC4MÞ¡Q#€iãÍÆÉ; ¼­ù߯䱣" R:hã¥,úÉÿŸÝ±Å†su{Îê`;AGÍÇ«ÃÉÈœº\ƒQ/ Ñ“Dà®—¦Ú¬Èè© ·mªwª1t¨vç¹’ˆ0 ¥3w[g2û +{êæ7³ cû CQã÷!‚y¤žŒ®A­$`ÊgZÛlŽG…yP¹Ù.@†m²hÓ¶Ø—0´Ô4ÖF¤$i•½  ÝY)làb²4J‰Œ‘P Ũ*ó}í;Ü\ðAŒHkœ +¢K,G‚!& l2Û2å ’qª¸Ü º’švª«ZÐf9š KL¤®»hŸÑ74Ç_9x¼d¤}y5ŠÅMctÚM»KÔiåªÿîrëí«¸¢ø{¤|‡óR ªæ>sÚ'Q„Z…VEª*d—ºMì(8‚|û®ßÚó‡BTE²sÆgæìÙ—u©[¾jwØœÕfÑ©~)lÑÊZ|GM×—©ç8÷Ƭ”ì.3 J÷ë]NP *²oÍ-å®×>@£ºÔ°S-"¤YÈ%&: †FùÍq=äÛ +LÕ$í;÷]ñfcÖ=ùâPVH–¬Nļ¤f²¸Ö÷¤­iŒ²ZÙ—\\ J+ºÓæ3 Ü<#¡¦ Òqî…q¦ÈX`CÁlmY¯}³å­Qjû/æ¥W$p“J#¬eƦµw.3Îv´ùÌ+- çW#¿­˜feá¤È§£\aÙ^ÈgAÓm„}µ¤Ûoéµáœ¡ûË4<‹,Õ•WšWbź\E&‹¡NCuè#{²ô´¢dôˆ¼\Œæn|}W®Ð.Ix“¢©£Î}Ô©‚TMaÉ#õ3ï@pÁ Ï".«…Ùkš„/jqÉ«ø} Í$Ø7®¡ãª=÷•œÂì ¦©UaøÅ´B&i!O­q½O6qŸžt‘ ÕPǺ~ûå¸ß¤ÍmTVuþ-²¬$ÜŸÊu+*+è (Ïá$ Ö°c#v˜YšÆi¡»Ñ u͸ÈìaÈúø`Ÿ²xZšÀÀ,9XˆÂlŸ¹l1=Üçö¦zmôÓ=uÓ;É* ×yÙ•TŸ‰¬`¬#†*•5t©³ÜðF)K„(LËŠ +3œ°©X$x½@JïÓ HjqI^˜IÑëZAiãÄväÓ!g¹oœ=DíÕÐùƒ$M ØD*'ŠM»˜Jn?²à2D'ZÎKs1 kÏçÕÔL-dÔRóö¡Ô3DÛZ¥Át/á]PGvŠ¢:мtè8r ¢•Êkºöy‚1¼6éQ߇Ë,÷Nnò¾'ŒÈ¨ؼ„з#Àѹ²Ja]ºSÚ“¥áM·EE[p……>0Ô'éo–%Cʹ©Q…Nhˆ—›æä)SÙʬ—.­¡R“å<·8—ÑF˜éÏHÁ™s‹ÐžÄ,•AeœAå.>ï}M¡cÏ][R7öÈëö¥("Šßöä-.ÞÜ:ºhîv5 +¸Fg¶ ø ù,ÏDêæ°Ðä Ô4œ†9ö‘½B4ÔöUˆÐµ\PïXWf`¿D§Í‹ÊGÔÀÔ4:E ¡U}Ô¨äØ­Pƒ&˜”i6² +p㻼Æ(DJS¥Äß ¦©8Sš]Oñ’I4Ë%QÊ}WÈB³"'š±ª{¢¿bj3zIòHþ"ŰñåŒÛO ¦hÁ“¾ZÀNÅæÉhàþ†?æég‘â§³@pêÂ!ÂÆ+]J}’Ò+öILÑ6gZ‚ÉàWé)…x˜UK DA’WLÛ %aŸº•ƒfÑ¡… ?}+Ùz®˜yïÒ`/íjP#Èß5?8Iµ¿ ¢žÝ½¸«¼•Žò­´Ö)„o‘Gaa¦ŸêS¥fÖδTG}^ŸÇiî€fty†Ï‚uí¶ŒÞ.(¾×=©Z° Ô1` +êÑØ˜÷˜YíÃÖ‰ÌèÃíÎÉ)K*A¥?Ï­‹ôÙÙôÅe:Ðß–Xø®/·¨°È}$'-¨Mʲû-Eõoh"™Ð Ðs°Ò6¯IÒí Š:‚²y‡™Ñ¦4T; …ÛÃ< †«áæ–ÏFŒÄº8ZA]Å­þ3e4T°þ¸„/x‹+õL‡M77‡nüXÐ̦~uÅ*ðNJJöCS pLH\eDgÿª­(ÐçEͨàXJ~èù†£~sïÂ¥êÊÛÖÊ:ºi—¨·TË0\’ÕúûKDVvDâžñ|îÎÂPr{x9%d åÈùiiÕøÐìákär˜TU0¥°U˜g3¯h¤Oúeà1 B“ 5›$¨ÌRèyHŧéöF_•îÄQè•"PA+4À¿ÜdcUc¦ÚlvÝC\u«äô©èt‡Ed>YØ«q5;·P§¯¥"Ô,¢¦À€Éü¬Ù’Ðyì->”Ö OŠ%´†nÓZĦ9d¤ÔŒ£*ÍåI_Ðò6V/æ“I€㹖ቕ¸W3vN±úcfü +¬¤HÄkdzîí¸Ft S€Õ¨ îª(%¢sS¦t‘䞦9…ÎËguPE£1‰KëêÕé(uÖê.#BX°,ÈA hK1¨} ^øT †+#ÙHa0éÔðëyžMÊÝa5¢<ë#–€ÚA®•i°@ǯh}¸ò’ß7% ÂM`g-ƒ‹ÜÏ{ËÑêÂ8Ý4Ó,2´ÂÝå@Ç!ù-èYô¬.ÄíuéJ?/“ Kà’†\NK¯´ýŒþI‡ØJ´Ô0'ˆ§‰~ŠD"4iâ×s/ô9øªT½´dÝ[|FõTé$ÔrfµÁDñLû” Î}8q +*U¥A;nÑwa]z”Y/ñ‘áL•Ö'|œBÉÊQX ³s}°@úÔ§£ˆ¯£Ci, ±‹ææÕQGšLsZêÔYÔ¬zÆVJ=ëNpÂnf!Ç÷i{v‚·s¡Ú)¾:ÊeËh¦!RnÚº€¯LvT‹ŒÝÔüiK?§Õê´W/en»¢OÈëRòujB¬œ¢4Ϙ\ÍÂôR­*¸ðaJφPÒÇP¶H´[qy W©D7¨ ¶ÍNÚbòTs.ᄤUZr+lQFn! ¸Š¹¹ê9YŽé«‚ Aê†8o9Ýâ¸ÕùË‹X•—ð0ŠsºSP\H”+ØM^ѲÅhÙä  ,¡°i(kùÔà²PbUŸ"•Yï~¶NMFš+§Y‹\oÖ¤ÄFÔÚÔSè÷Ú϶‘¢`œ(HÁ)Ì=§Xh¡ñ¥mƒÆƒÔ®òcìk±` %þ48†¢É:RTIé†ZM­"qÚ•zٷж$«¢×ŠQæ¬A fõ,Gxê¥Yº ¹JÄ*ë5°Éœ]‚šEÊT +ãc¹)vT™Ô_¼æÞRð6K€=ÊC7û'²WéJ(i +ÃÍËðvñ²ôÊZ˜Ù¶HÇ|Õ¸DÖÕ’¿Â=< 3Yk×*Y3¼±Û²Èmt\Ø;܈Ê+…™œAÛ+÷vÄÊV›":+£o ¡Å6íÌ8öŒ<Î×´—ãÝ/îHDîâ²eÍË0"ýLFRË+> +7íÄ"Tv e¤# +h\í®]3Ïpn8½4ú,Ç;ßÚ!Hª[aýÒ‹:®nƒäÔSeÝi™©Ó°¿KáVüè«-࢘‡ã|ò)QØrÃ_}1"‘x“|ÉfµáKV´géëjÛÜKu«u0[¿Ä=|LP<5Ch8º¢2Øð5‚a@ƒÿôù‚ÊA +ÜŠ™ èzD½<`Ó6äWÑEÈ¿ýëãG¿ùæÃ^?|rûìáöþîúõÛã÷¬¥ãÃÏïŽ÷>ú<§ãÙýËW÷oîžßÿëúÕÍñòþùÍûÇïxíüxüèÿÓ>¦þùúááæõÝÓ›ïn¿x}Í{yûêrÕµ£•-ã„ +•”Ór8«‰“þ}øôæúŗׯodË/Ž=¿zqûìæ«g×/nï¾ûìõíó/nÞñãûû¿xýÓ»ëo_Ü|öæöùÍ÷ûµün¬ŸþøêþõÃÿD¹ï{ï½|ý7=Ÿo>½{þÕÛ—ßÞ¿ˆ…öÍǺãÝeé½'o¾½yÿq:>zü(_ÿðøÑþó ?þÄG—ûÿ}Êþåã‹øS<~ý–§?êùßbŸ~ü z_ÿG:þË{ÕíÆuÜàû~‡½”D˜ÿ^F€Q´U@ŽQI¬¥µªVZ ’ŒÄoÓÛ¾FûbýHΜ9çìZ>…å,ÃáÃäÇKX>?ã8¶2gMÄèþxÂax! àt$ã¾ áX(«.¯u¨ÂèðÚ§»ö z{-Oüô?ܬ·›þøœµéÿûþÝ»ÇÍÓôM¿A“—À{Ù(ÄND´.5hÎ +×o±Sˆ,»Ï´ÝlÐÏN÷Ù\< +ËT©Ï4쇅SêG¶np93¿—ÑúU +ž«#’çWhAïÚ]<¬?ÒyhcÂXVDålwQ^|ˆ*¤ +ÆjT˜ùÐÐ#S™‡•Pe3ƒ‹G€¹)Ô‰—”u`‹eâ„:ÕYC'JÆÀdTonoy4a‚€ð¦ÈÒÊ8àõpÌÃ5g­³È<4ƒù&ˆAÅTÐM-..Gÿ(2û\±Z€ŒE—Ç0C:ˆ§ú©Jøhtk:jH“$ søtŸÑå¡dt•ÍÃèÖæ$^PR/(ÐÈÁþ;nà%ã&Å×Üàâ€ËB±¾æÊÕØ &p[P9ñ²gM¾¢ ŒQCV¡47¸t:eR2Þ#9­NwéõBìI|œ6SONà#³‚°ÎrãÅ"1ÎM>ýÑpÏa¶Ïó›‡À.Ⱦ".¨[âk1Y}O@Yud^iDs{KûodëÃ^É]ßbI¸UÐõ•3Û\ÜšÄÕÆ@ä 0ÂDج-í>PL”’¶PézË`£*@T¹)‰77NSP57·8z°Û¥”¸Ë`ÉÆ.xô~®[Ê¥yJ®J{'r”>%GúÐscÿèg¼­,Oy«NXOâD\É*¤³úÅGa}cœ¸<Ȧæj2`óâÞMY f˜ðyÉÕŠd</¥á$b‰dœxU›[Úy2ú-Ì`AŽQ?á/ÁW?#j€ýDÌ%dÐæ°T”wÌL-4ǤÁsò…JÀÐÌ(>x—mÃs{21K¯o®/6¯/Ö7×Û+vö¯›=Œ“»»›‰ú«íúíÍæOï¯/7UÍîúúê÷û»‡§‘—5^Z½\ÿ ? l~}µ½|ýáöíÝ +¯'ˆqÛDG?>¬¯ŸW—›ÕÅÝûûÍêáîv}½}\ý|Dÿù^b{ñ÷ÍãÏ/_¾0«o9W翽øê=ãôÛ{þ9YYÕ¹…ÅrõÝê§_ÌêÏÏ„_¯¸½µÝ~Vâ°’?ÊÄ'ß]mËw|¯£’ÿýt¶¹ÿï¿ÖW›_¸DÎa*ë(—[HÂÕMûöF¾Egüm¥~ß}Ôô±Uã•C‹ñvpnPtÆß;5.lªoçEgü}иWãL8›ñvpnPtÆß5îm7ÞÎ z;û>h<ªq;J¨%tlÐÒìû ñTÑÒmÛQ>Çöòôó å,–G¹¥²ÛŠ“¯ƒF‹µÝÝžÂnÉæéçA«$V]`Û}s4ýìé­üS¨ +Š—t™`=þ¾iߨ’›¦3þ&;Ø;{¦º_óØŒJŠ?ûÄûõû?ÐG”|;¢ác:ê&’ RÏwЍ`ì‰(È2Ë¢äëÙXÜê¢ %¨4µ£® +²’H®tQ;cIsE‘­ÆR•`:u?†£&WƒÞÕ[U+RI“ßdf8%¢ë ^¤Xà[€Ô¨£Ô½ ”5jçD"J*²¥m"½M—FiâyÐ[­«Ö¢­Ï` ¸Åà«wú6ŘUÑjASÎ]4¤$f7SL5Ãu;d(ØPÃåv©P‹.xW]Ò{@ÓDEÓe°½álˆ.²Xï—¬"bâÄ¢+²Bð=^²ê ÐTõ¨=|4Ь€x|—èÉGã\O2*7§’­†åyõjGc¶¾>"0'KjÍà¹'¢ÏSÎR©†jÎxªŽ„$¯É G}vzÔ%WïpAÒYî$Ø\/Q;k\ EUEi±’ ÜÌùКÞdÈê^øõ›¾` Ô"Súm¸ó78ò0èÇß™>‡Z|Þ-ŸH-B,Ùiø·“Ë\(þ›&E©t˜PÅðÌ­4txðød‘£Q+’VÅÒÐê?{Uoü%D½°=uió&ø8?¬¯W_{–N0ñ¦to|‘ge·[«à&¤±ešIø †“MèÚÄó%U‘©#~ H@cFQ8§QhSQT/BÊÈ4l¢¡;9ò]:<€ôO¤Î9ߓҙ1¡KÕëHUd³ò ŸE}tAêx‡$Óu=V}ùÇv"ŽNv#{s@½ÀŠà+øÉÛª-³òÕ’¨î +„¨‹÷F®SÝÒe›ÞÓ§÷½ +­êé¹Å솊‰Mmvè|lk<B}&=†¬Š±*´õìhøÈÎÓ2yö¸ÉnéŠKó#†Mëù“"Òu1br¨Ûõµãn­ÞJ–ð~òA%¬EV*ð"È~Ô©g¡LWèÊUY¶Ö7§]öSòó) Ý%"ö{õ·¾Dÿ1Uá¤q ?…sÖ`馷֙ŠEОà®eu?šÛy'W$‹ûÚO‡Ô¬}t5¦…ðª/÷5×5šº]õÅvG^OY0p~ôh¦ûñõÒYMù¼ñ3jm\ê 2¹M2ÂtÂÌrsBš”QhW¦¦q2›‰— ¡,»\¿³ëë­_Ò*®.ý•xÙ!’|ŸZºä›)Žú 省Ë#HÅËwº÷Ö×e:š¾J³5‘²¬_‰Áíw¾K.O}Þqy—ã¿ÜgÉo6Çsåßüͯpy¿n•_èòÄÏqt¨pÆÂ8&O-™SEuNEÕm1-))AYŸhBF$‚*ˆ>Hö^‡–9ý]峕ýœ¼–Ý/¨#m¾¿‚d¢=:‚éÒ^©:ICùÓ†¿UD’}iF³¿›á7N‘K»nö®IgK¨ær +J>ÔgóˆÊª‘—Ý•~]£ÚéºH©‡.™tøÓi´Ô·¶‡ Ê¥Æ?Q©ºSmÛ4 +ò^±ÅJÀ Ô¶ º\ë´û0ÒCÝ(KXÐÙ½™mebDÕ»¿¶UAì'ÏP°¡´mˆv.Ûµóª%Ù¼£ÞÀlê!ÀUè+¥Í^“bÛÏi£0l‹)A€ºÍX½=BïËNìí;PMÐC ¢'Ò¼Zý¼¬Ð¯›S‹rÝn«%]x4eúJÊTÐÔÜW}¼èôôˆOýÑùAg×úb«ËJ{dÐJ{”ü3;³ 1¯Èdt™þCO“ZTí6ëðé¶êU ×?ï;N‡ðL;_ñM/‰ }¤+ÍöI¥Ýs¶ŽõY¹aï–]éaÒªAPK¥äÊ(G&è ¡Ïq>«^r¶ç÷ÖÖÓ9ø¨­¯7·R®Ü*Ø´ARçù ©Ë= +²_·è'Ùç}Avá•ß C„óoþæWÈþ¯[åÊ>kÊÉk‹Å…_wíË'« ¿PQógKš}%*td ä5!”\{–Ï6êrý”LJ££[Xñý°øöÜ¿·Õjlät–)’·w nî¶KfÔ—¿ ±_ޱN[?C«ô³¶Q”À¢d2i…êªJS7ϵ@+Å)莬.2ôõhôâ—à&e…À¨ fÊÐ>ìX4tå˜Û>2ŠëYëù,Äê{7™¶‘m ÎÐziÐЂ}šîö#FÕ϶ ò:šX‡.•7]<-¤¸–K14»½z¼ID9…N¼,¼æ ¿{B:¶ólÝø9̶ÈE­Yr#–zÆò½,‹‚¦#£Uu:]-¡ÉòE-Æ:ݵáÓmÒÖôUw<'gÙ黲=¹S¦@`K¨$ïS;mMè² —ãeÚt¥y)Ï:5(êžKñ ®!ÊܧLµüÔ'Š£S@sÖçC~7ØtCCÐÙ$¬úž·s…‹àœ–Ê-Eû*óAg”QéSí‹ÍFÚÔ•ê²ÃûÅŠ-7²šw_,§°^H ÛÓ/6Š +¢z}ÈhA§£Ïá7JÔÁ,‹L‡×PóéÐ_b·àþpl†È¶:°ŒÝuõw£Úœ¥‘±œËR5îL­›#k‚ïâ·2†í"¼Ò› i±0‚f²Ü5rðíš5! ߎî!ú“S z†»lºÛl± ã-Ãtt¿³jC]–Üm&"!8#‚ŠƒÌ5\ ª¯9Ðd¦CTëïj62aÕ¢¯>G>T¯NÏù÷+wWRÐŽë“òÓ´UÔÑ{u·ÕÕÝmžiµöUWñ3ÔFZ‰^—žhÙ¦LѲž1üÖXytËläGŒm^÷8=°4¬Ý,ªŽÀÐɵӖFò¢%ºf4ʺ–`ä*ù‰Õ?ÜNRÑ´éÎÕœøAÉòŠ£Îe¤N;+ˆ7Fõ&ì7t*0î5hyôÖêõM©83ÂÜîâÙ¶åu»‡ +/,S÷ÔG·GªabÄVÔû‡ŽgçHdÖiÛTÜH½›¼É(¡ó&ó~öb9Óu‹ëºé‰ýõ~NAv +aˆãúÐé>í +Íð•0ŽáÊzÑcWÎݘ³æ©6r©.5öM­[õXÓGká;mü (çס‘ˆÚb¯tå ©ŸyMîãSœ¼>eÂÎ:Õ“ëOõ`§*c:íN}xKÅc‹0‚NoÅ73úIéÙÐÕÇMR tGȉ +WÏ’5­ì. ¥ÀUc|ªw1*®ÖÇê6ï4`Šo³n!­¥õKá—LH¤éCO˜º²›îœØ‡8e"–©‰ÌféïŠ(q@×ñ¡æDn—Æ2‚[‡›]×-㧘.g +³^Ã9á–=VÃ+ŒA®ã)÷ǶÀ—ûà\mGã Âk|¾;Ψ|©üW¾üM¾ÃŸ Ú¢­¶oùÍÑùɿɿù­|ùþ›ßÔ/ú÷Û?}ÿã?þóïþå¯?þùw<ýÿÀ|Û¿üë›ÿg•ŸÕÉ•/¿ÿ+ý,ÿ“u؇֭ÈHÙî¹—Ä‘íÛÉ%Ù!tºãûÂÈΨËe ÊÉMÜ>lþ6û™(èä íÞæ(=’žN}7­Lx†ã”4Û‹x.âα½›”´Ø›Íx²›ùÔ@ \,AOUa#ÈK‡~sU(®®«F)7,ºã(ø6VÏMxB°_—|yWÃá®IŸŃ:T"ÝUÈétX-î–¸tdjthô×C®6ìÔížLV•Œsi…t»WïGè6ôˆëš}x¤ÇÎHº•ˆ•}õÌ*ã>;ôg‘9Šm0t‘é\¶´ (¿ËJ±éLfº‚”üý¶ß£BÌð1";n2|LÌ犬ó=ì-Õ‡Þ›»Ê%’«á-n‹Åß&ÃÇtZʸáãWk麺*F€ºz2|L[³Í¡1Ú†k#õqÜ|jOÈÇÖ]Ï÷‡ÓR:¦= eÃÇêx6­Ø"æøm™ðóúØÔÞØ0íå#Ž+¯¬¯ÐBw‘uv¾–ðîø5v×5v®^z'…Z¾åè–/£H±D=e:«nÏXYç›Ú²ÁŠW 0Z£_•Â'ëPßBëßNì©ü< +'—cxÔÏYdqxËž)T›Ñ,#m‘t<_BW‚’Ý’·.ÞÉZímqLÇ\W¡ÉÙÖ|îÒÄî*4¦ý3ƶ¸w»fŠØ¾B'¦Ö®k×Ñ.¡í˜ö&EaÚMQLxNðC2 +ËÔôNf3sUdÔª‹ùŒHOõÒYF¥Kù`cà+æÿ%½ZRô¼•è +²‡&ƒÛHªRIÚC àL!ƒ`L±!ă¬)ëÈÆn•ê¥OÝdp/Æð÷ùô(Õ󜨧Ç0g´¬ymgýÕŒA²°û0?âŽPéÁAª>Ë—¹ Â['<ÙOr>áP›ÖvD÷‹ÊÝÔ`¨ÚÈñj¯kµvg£Ã”§$CÞK,oO$ßy Å^uœVH}t\[ÄÑNnþÄ]ñƖ®ÖrŒÈ$ÕãX˜4½Àµ}ö‰×%QXk¦¡4¸Xë,AÓzÈÛDÉ|§¹KåZ¸]/æUzĜ瞑XÛ]äðºµí³Ê$¦¼.Kä«¡öøÊwÐ-Ñ ÕjiªF •à.ˆ]láªíÔg’âAËê-¹U¢“ü’ŽÎãCîmáy3çÚʳôEÜ]¥/êvÒ]ú隇ܣ‰ôÚæzœÉÞÞ³íè`ýÙíôì`l`¿x®Jלh¤WïÎåÍÓ Ms¶Y›IÆŽ!ìýaH¶¡¬±;=XZmO*à3ge¸ÞA™,^·+ yØÈü ß>¦ÙtÈG7ªãrMB§R6Â%!™~øt€š¡µ6® Ç+2jmo&‹¸fÔ;“{ï\ëèXOíÈÆLóV:š'’¸×Ûð(þ”Ñt­ÕÙuBé™Vï§¹Ÿ[xæyàšWûhNZÉu¸à‘ЦR:ê“7è£Ó=Æ`ŽÉúR^äÿï² +/©:$wDz«ä7“ºÏ¾æü >tËË9Nÿ}ûó‡Oüó÷Ÿ¿þöé—ïøøŸ~ä¯ýå¯oþŸ[¾êX//?üËM_å›Ü +ŠYqÑÙðgTÈ»€»-od÷Np˜œ#è¶J|’¬Õª(ÙVæ"¶nTëÄ}óu‡|k‡ +÷Ââæy·Ç…5 ñ­8K7‹›]«´RhךtCL0s¯£ØckUü­8Š"Mú{ì¤eèVd +M5ØÎ ­ÃO‰nÏê%CQ›Ÿ¶ f‹ÈèâÑ¢6÷PR×u3 †Æ exÖº’¹XûëF¦²IDl]MƒÖlk­ DÅ,ÅLnÂŒ}/;T/ôø 2—ÂV+Í-%È"Â÷NfCŠN´…˯ù!,·ô~@Î}›ß› Ýl—Lœˆ–Ü%õÇe™ª‰‡¾ª§·›Ù s¯£»6ü¸`°uÜþDβSæ¢Ä”ÑF.‹tž $LÏ^[È=µ¹Ç±Žìz«¿M¦ +$=8œ jKÃb±S8ö6<¶mçC ³luØË;Š)ÑÎ2†Ú2K”ì$:Ryê°ÚHoú~3šÜTÊ* y-䀚G7[Ö23ßÁHnhñ[†e'œzeË×'RFú)P!~køx½ÕlkmžMjÙÂí±ýˆ6ª½nY¡*$ý5ŠÌÉeÞ~;® —ò¬SPjs)nà] ¡«Õò“%GO( +hʨ|,ÄáÇjR”êª÷{;Wø0Îi©¬³{…2/hÂÌ20Tú¾Øi¤m€©úµÃûÅJ“‘¬æ©6³¯Më…´¯~±¡¬ ª… A·Ñ}Ÿq£d¨ÎÚópx ¡ÞçÀý4CÆvÒíJvó§}Ý»Bh8U»»´un5¶ÞK6»£:Í0¤#C›_m•D7’*ã@‹…ë8Íeíqmé%YnØW`1ßß5=W°Ü‚ ¥RÇB'h³¸¶Ïî —ÌaæaÍÜá0›3š3þñ¸ª§ÖJÔ5Aœæ.•káv½˜Wé#ð,9#9°¶„R¥A‚Ÿ/pg‘ƒËJå`¾â£ÚßA‡OX£Óü„jÃÏI·ts;g´ƒ-´V Íoçú'”¢àD'ù%Á(«wÒ +Ój²Í¬«ÚŠ3>°K”øìRk¶·ôI }¼\sè"¬éTT?$X_`ÛC¨õAÍL<2¤wëMTÁ÷"-{ñ\7t*ÉD#½¸5ÑÃÓ MsÂCÀRþˆCØû#Âຟ‚IjìNxE«íIœÖ>s&ÔÒf‹oбÆu»±—ÓÆˆoÃãózt#ãáéš„N&j¬lïL?†eœäwìmFÿ3ÈñŠL…MÀߤW£Þ™äRôF‡Qe?Óåîq3CÍ J¶áQü):XZoÏ®JÏœh´z?Íý|ÜzÄã0ÏÇмÚGËA}$×á‚Gv°6Sš°YñôÑécðû“*iËËï®{™Öî!¹…êÒíò{èOYrüäzVyùà‡Õ—oþðéþþó×ß>ýòþӼൿüõÍÿ~ÇWéå凹ç«|“[vlJ ¾™!ØÄ4gÚº@*]ùˆP‰i[ûL¤ÝKP—>öÜdÄØ&C Îw(¶‘>…8V·ÓÈ=× öÕÁ„B§ëµ}Mº!œym Øc+*þÖ- ik;›¡­_Xª3÷:´?%ÚÑø"ö­‚$ ý42ìF[ÇP§è@R×5]X¦ ½no؈o¥ÒúµŽ°h¬µ·nˆÌM b/éÖ†`œœFÕ[gCþ6FZLJXØ¡èá`…µœk/Òã¶·ƒ“Xx6Ùh¹ßUÕFL<&$¤{ɉҹ6Û—h‡&?b ”[­sAhX†”ıøâÖÓCúè® ;®ï$|5Õ$·ê1ÒÙºªÒ=Úì +O$åxöXÄ„ÜQkhb!CZ½ñ†n-R é¾áã¢åű-ÛÑq™%`±E?r`j&•òâ5JØô8h!šJ5H¸sTü¦‚Îe{{ëú6,^̨gBY«_ }>pNu/>+avóÈ­Z­l˰Ôâ3îà RF:*Pž~©û¸·åwše]ʢ̺‰Ñ~Øþ»f[ìÚ©¸{ZK@jn™ÝnÏׂ^ÆC=͹Agíš[Š·4í+“Ôj¹Y@ô¢CžŠVÓçBDÒä)C_Ïy)–o%â–)0§¥ñÜ£•¡nºç€&Ì,C¥E¡®`„¹²U€1.Оé[­Ü©6³Ï¢ÂмZņ¢zhÎÇB†ÊÔ6HÑË'út%K`Ìf ûðò©~`4ÚsÔ~ r!û?"&`Y®nêüÅ…ÈfÛÒÃú¢”TêwwŠÊjö®Š.OÐÜtèiÃ÷\kØü î¹:Pðw(=ÑP1›ï=¶Óf³ûWœD>¤ShÉw§¦È GRŸcÝx!÷:jQ,ˆ–_ýÚeceÎ|Mg8ܾÑl ãó<Ä0¡p‚åãÝ£w«ï“P*ɽQ"mŠ*d-s\E +?„›IYŸm¡ +)ÈC/:Z–v9Íq+E )RêPõ­ÊÜRŽ(gE̽- éÖéXø9ÒÚ&ogÈ2%.aÈUzˆ'F§/$ ­jÇ)ÿ<¡z&¢;þ‚h¦22—CÝ]+­æVoÄ£(à $jȸ,¥žy°¬¸çšÆ±Œ€Ôežör=Ò%SÍ·6M[pÒ«xb—#ùœ •t*fMRûÇ–•Ðä¹iÒ²…SÃã=õm1õ-7h¨ºæ(*Ë$ʪ¼@-•¶@»ÎÛ¨YƒÇ BQd4*d-Ë—1ª‰â&éÙ÷L*!«»å¾]+-†††4zFwÝgßFÔ(bs‰bM4©4±·ôéÀmtC­‡s6‡Ê$X¤|l‡œäPö GÓ…ì,‰þèj€b*Ï!° +Ÿb§0–Å¡¾aI׬©éi ‹nºNÆ(½F—±ÇÒ¶Gƒº×=µbp˰¤Â:Oí']Ê^iÊf½ˆÄ P*kÅëvZ·q¨ÃmömÛH匈2·EsÙ¹]r£(tF©:^®G_ð¸‚ÿ8–|3Z<ãu¨ò—Sv¹“ÄÖ¥F»Fgwª{ ÖtѽMì>ZÛ2W¶8.„a~S‹l[!!3Žjjò£‚¾3õ›@€´ëÙTƒ`ƒE0ñA…¬˜¡$ÍÍ"š.W d1¤[HÛÃýf€ìjY´¨Ýò{äÏÖEï%¨ðABU¤!q(ämE˪ XBÞÎñÑ®"¤;SÇ‘äAq.I­Š1u½kÚ¬%©±*jC‡X£½lÇhoL[ì ¼ey÷¡ôcúV#nU¢®E.’ÊÁÐ{*¼›ÐÎaýôæ(Ð* +JªEPM”Ž9úYŠ~ä@б&N¥X ƒ"‰žÏnÚ ­2½X«t÷z•î…¡Ò™/OF˜2˜e;Jop'ªà Ù•®ée¢6â²ä;F 鿦——+¶Ó nhÈ’}£M—ó”4kç’_mÈž þc8¤>×KirÏÐ)rŸ“¤?k{ê§?´¤Æ6j”Oc¤ªÌéÒ— Ù»M!¦l{"IµWúlžlšNa íúÄœP{BͰ2ö^ ãÕ%é y‹XÑs±¬ïšQ]3éç˜i«Jé™×ñú›üÚÖªÖúÈÕÚ†Ö7¤áj5­ 9 ¨uX³$¬éˆåøäŃDA˜“"Ù…³Åé®ö†£ÝpÿBöL«Ìl/±,7xoR6jqG¶už‡HB5´mBêC¬“ +¹”sJÙK»³“Çú.¬äº6‰ :k`§Uê…•¬‚Žk”{‰t\ R½ñA4â1ˆJ ZTÁ R2¼@tÀÖ±7À$pý ûØ÷¡iˆŸé5€±Êš<¿+džðÅ«µùÒÕÊå£u«e¯YõZϨàe\£dð VSÒ÷øâ™hmùD£ÂÓêû¨·˜h<¾Ò +/y<{2Ñ*Ë>…%F+ÁøÃÐUÀ—–í/œK‘A»-Ì•A× ?ux±{¦ ý{jÒ©ãÕ뀡؇†Qãñ5ž:¼äñé)Ž +Ohµ²ÐˆÑJéÔ7s^cËöG ê­̯2ònLyª \çœS½t¡d.„”5Ð@ +¡O-ʰqî”´*9—BÆñè—Ôà¬) U²-ínD$IØnËOËùRÑ…;©Œf΂’ %{’ŸÃ8Ç2È<þ%5JÞ¤ýÝúÚÿ!Âv«[þ5Åö›ç®d¢ð˜Ì«/5¸ÍRÅ¿3b$®è0$/EqÈ#Bˆ‹€“ýX!8Qw$³O/E™`#”¨ ŠöY­(ޤºôÎÒ]”F1ã)cË!M嘦àI"AÏ7Þ³'2Ä Eü ÇHâ v­i¶áö ³ŸvF4……¦rÌm5ŸtÓáö2âÿˆ3wU㣽¬Mée]40¹d1[ƒF«8¢hÁ`Nª_ŒQµ*îÈYp—ï9W²âmåŠucÒU>-¸0¢JÅ¥Ù +.òèPõJ,‘¤bшf ùžï¤Qþ ødÌWΊ;ª›ÑiGNÍYeœT~ØÞúËÉd½ùÛül3_-§ë+ñWY1y´ÜˆýGÊZ+Þk®§óAÜÀ7´ôêÕjéCqó´çó׃xµX­Á™qä  ËrðƒùâtX§3¯±z°ZÏÄ mÀÌKmÕÙÊß°W“]±G<n–,û Œá¶òæ<%éþQŠFÀͦ¿þ:ÈòðbÄþ£“ƒ5$†Åf??…$rrx »“À¥“C<á»õûáËžê#\ w11çßÿºG„äeÃ)ñox“ø»yß¿s&oÁÀ/n€ý>ëò./ïà÷¥–§„–×þ× +×$׫ϼ8‹½>ò¢—â^ ¬ÆÕÕgZÝgØzqïè>ü¹•Óÿ×W庴ç7ÇòeÌ”Öb"~€·óa&ÌDÄ_¼/Aâ…b½I?Ìê¿÷íÇvËÓt½M “D#óŽýÆ>¶[*5ðܸ=§ŒÇ¿lT>V:üÙ°xüö+ŒxÜ'Ç8M¨Ih¦=)M¯•uñ‘¢À'Â÷¨Ùã7ˆëñæj1€šŸ|·\}X†7¯Hwö—«%¨JÀä ÄÓ‹»É>hÖ÷C39\_¬Þ-gP¸à¸aÚá›é|)p»ÈPóÚ“Ì/ç .ƒÑkŒo¦goïdä`z9?+,¬Wo‡»˜ÐáÓâhMS½U.ÒÁ «Í³¢?rû¯8¼!B!ÅCÿO Ý‹+¿Êßáég¡÷º¶õ]TöÇâÇŸ¤˜mÈÐxÀÿ3¿ÜðJ|+¶·ÄÎد›Æa À¯Ò+꿱=âDQœ èž#b‰j»Ej´Ï$ »›t…ʇ¹ÕM2'“Ïž(ôÅÿ3Qݶ|qÿs]îŸï_êÝSw­9EÛat­Aó'53MÄÉIÑ5Ë?á©n’9lCŒ©È©€$ÌH¡ƒ J½u^/½ðq qÔO›æJ#ûÉ~æê§f?§ò´˜…Ÿšvq)˜nØ«oͰÐä±¥–d óÒ0S©è<|Då|4gõÑh„1AèxlAûÈ>æê#°Óø¨I%\@¨HÐgR¡¹Ù@êocµf È\´ ä4@ÊKGËM>´6õñ¼ûG°ÊŽð(…‡xœžÊ<2yñè˜Ç©xpÇþQ‹‰—JAê¥öë»À´&ÙÏ +ô8ê§5ñ8¹’ùd>óâÓ3ŸÓð©/…uÞäÁ§–®¿ 0rL +endstream endobj 7 0 obj [6 0 R] endobj 40 0 obj <> endobj xref +0 41 +0000000000 65535 f +0000000016 00000 n +0000000144 00000 n +0000054115 00000 n +0000000000 00000 f +0000072815 00000 n +0000075664 00000 n +0000287128 00000 n +0000054166 00000 n +0000054601 00000 n +0000070779 00000 n +0000075974 00000 n +0000075488 00000 n +0000075851 00000 n +0000072081 00000 n +0000072413 00000 n +0000070845 00000 n +0000071519 00000 n +0000071567 00000 n +0000075601 00000 n +0000072752 00000 n +0000073193 00000 n +0000073506 00000 n +0000075735 00000 n +0000075766 00000 n +0000076048 00000 n +0000076486 00000 n +0000077533 00000 n +0000086353 00000 n +0000096085 00000 n +0000109264 00000 n +0000130936 00000 n +0000143509 00000 n +0000162175 00000 n +0000183404 00000 n +0000191200 00000 n +0000195143 00000 n +0000219320 00000 n +0000245296 00000 n +0000269613 00000 n +0000287151 00000 n +trailer +<<0EF2AB34820406488408A71C592F537A>]>> +startxref +287327 +%%EOF diff --git a/kubespray/logo/usage_guidelines.md b/kubespray/logo/usage_guidelines.md new file mode 100644 index 0000000..9a08123 --- /dev/null +++ b/kubespray/logo/usage_guidelines.md @@ -0,0 +1,16 @@ +# Kubernetes Branding Guidelines + +These guidelines provide you with guidance for using the Kubespray logo. +All artwork is made available under the Linux Foundation trademark usage +[guidelines](https://www.linuxfoundation.org/trademark-usage/). This text from +those guidelines, and the correct and incorrect usage examples, are particularly +helpful: +>Certain marks of The Linux Foundation have been created to enable you to +>communicate compatibility or interoperability of software or products. In +>addition to the requirement that any use of a mark to make an assertion of +>compatibility must, of course, be accurate, the use of these marks must +>avoid confusion regarding The Linux Foundation’s association with the +>product. The use of the mark cannot imply that The Linux Foundation or +>its projects are sponsoring or endorsing the product. + +Additionally, permission is granted to modify the Kubespray mark for non-commercial uses such as t-shirts and stickers. diff --git a/kubespray/old.git/HEAD b/kubespray/old.git/HEAD new file mode 100644 index 0000000..cb089cd --- /dev/null +++ b/kubespray/old.git/HEAD @@ -0,0 +1 @@ +ref: refs/heads/master diff --git a/kubespray/old.git/config b/kubespray/old.git/config new file mode 100644 index 0000000..520554a --- /dev/null +++ b/kubespray/old.git/config @@ -0,0 +1,11 @@ +[core] + repositoryformatversion = 0 + filemode = true + bare = false + logallrefupdates = true +[remote "origin"] + url = https://github.com/kubernetes-sigs/kubespray.git + fetch = +refs/heads/*:refs/remotes/origin/* +[branch "master"] + remote = origin + merge = refs/heads/master diff --git a/kubespray/old.git/description b/kubespray/old.git/description new file mode 100644 index 0000000..498b267 --- /dev/null +++ b/kubespray/old.git/description @@ -0,0 +1 @@ +Unnamed repository; edit this file 'description' to name the repository. diff --git a/kubespray/old.git/hooks/applypatch-msg.sample b/kubespray/old.git/hooks/applypatch-msg.sample new file mode 100755 index 0000000..a5d7b84 --- /dev/null +++ b/kubespray/old.git/hooks/applypatch-msg.sample @@ -0,0 +1,15 @@ +#!/bin/sh +# +# An example hook script to check the commit log message taken by +# applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. The hook is +# allowed to edit the commit message file. +# +# To enable this hook, rename this file to "applypatch-msg". + +. git-sh-setup +commitmsg="$(git rev-parse --git-path hooks/commit-msg)" +test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"} +: diff --git a/kubespray/old.git/hooks/commit-msg.sample b/kubespray/old.git/hooks/commit-msg.sample new file mode 100755 index 0000000..b58d118 --- /dev/null +++ b/kubespray/old.git/hooks/commit-msg.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to check the commit log message. +# Called by "git commit" with one argument, the name of the file +# that has the commit message. The hook should exit with non-zero +# status after issuing an appropriate message if it wants to stop the +# commit. The hook is allowed to edit the commit message file. +# +# To enable this hook, rename this file to "commit-msg". + +# Uncomment the below to add a Signed-off-by line to the message. +# Doing this in a hook is a bad idea in general, but the prepare-commit-msg +# hook is more suited to it. +# +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/kubespray/old.git/hooks/fsmonitor-watchman.sample b/kubespray/old.git/hooks/fsmonitor-watchman.sample new file mode 100755 index 0000000..ef94fa2 --- /dev/null +++ b/kubespray/old.git/hooks/fsmonitor-watchman.sample @@ -0,0 +1,109 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use IPC::Open2; + +# An example hook script to integrate Watchman +# (https://facebook.github.io/watchman/) with git to speed up detecting +# new and modified files. +# +# The hook is passed a version (currently 1) and a time in nanoseconds +# formatted as a string and outputs to stdout all files that have been +# modified since the given time. Paths must be relative to the root of +# the working tree and separated by a single NUL. +# +# To enable this hook, rename this file to "query-watchman" and set +# 'git config core.fsmonitor .git/hooks/query-watchman' +# +my ($version, $time) = @ARGV; + +# Check the hook interface version + +if ($version == 1) { + # convert nanoseconds to seconds + # subtract one second to make sure watchman will return all changes + $time = int ($time / 1000000000) - 1; +} else { + die "Unsupported query-fsmonitor hook version '$version'.\n" . + "Falling back to scanning...\n"; +} + +my $git_work_tree; +if ($^O =~ 'msys' || $^O =~ 'cygwin') { + $git_work_tree = Win32::GetCwd(); + $git_work_tree =~ tr/\\/\//; +} else { + require Cwd; + $git_work_tree = Cwd::cwd(); +} + +my $retry = 1; + +launch_watchman(); + +sub launch_watchman { + + my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') + or die "open2() failed: $!\n" . + "Falling back to scanning...\n"; + + # In the query expression below we're asking for names of files that + # changed since $time but were not transient (ie created after + # $time but no longer exist). + # + # To accomplish this, we're using the "since" generator to use the + # recency index to select candidate nodes and "fields" to limit the + # output to file names only. + + my $query = <<" END"; + ["query", "$git_work_tree", { + "since": $time, + "fields": ["name"] + }] + END + + print CHLD_IN $query; + close CHLD_IN; + my $response = do {local $/; }; + + die "Watchman: command returned no output.\n" . + "Falling back to scanning...\n" if $response eq ""; + die "Watchman: command returned invalid output: $response\n" . + "Falling back to scanning...\n" unless $response =~ /^\{/; + + my $json_pkg; + eval { + require JSON::XS; + $json_pkg = "JSON::XS"; + 1; + } or do { + require JSON::PP; + $json_pkg = "JSON::PP"; + }; + + my $o = $json_pkg->new->utf8->decode($response); + + if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) { + print STDERR "Adding '$git_work_tree' to watchman's watch list.\n"; + $retry--; + qx/watchman watch "$git_work_tree"/; + die "Failed to make watchman watch '$git_work_tree'.\n" . + "Falling back to scanning...\n" if $? != 0; + + # Watchman will always return all files on the first query so + # return the fast "everything is dirty" flag to git and do the + # Watchman query just to get it over with now so we won't pay + # the cost in git to look up each individual file. + print "/\0"; + eval { launch_watchman() }; + exit 0; + } + + die "Watchman: $o->{error}.\n" . + "Falling back to scanning...\n" if $o->{error}; + + binmode STDOUT, ":utf8"; + local $, = "\0"; + print @{$o->{files}}; +} diff --git a/kubespray/old.git/hooks/post-update.sample b/kubespray/old.git/hooks/post-update.sample new file mode 100755 index 0000000..ec17ec1 --- /dev/null +++ b/kubespray/old.git/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/kubespray/old.git/hooks/pre-applypatch.sample b/kubespray/old.git/hooks/pre-applypatch.sample new file mode 100755 index 0000000..4142082 --- /dev/null +++ b/kubespray/old.git/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +precommit="$(git rev-parse --git-path hooks/pre-commit)" +test -x "$precommit" && exec "$precommit" ${1+"$@"} +: diff --git a/kubespray/old.git/hooks/pre-commit.sample b/kubespray/old.git/hooks/pre-commit.sample new file mode 100755 index 0000000..6a75641 --- /dev/null +++ b/kubespray/old.git/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=$(git hash-object -t tree /dev/null) +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/kubespray/old.git/hooks/pre-merge-commit.sample b/kubespray/old.git/hooks/pre-merge-commit.sample new file mode 100755 index 0000000..399eab1 --- /dev/null +++ b/kubespray/old.git/hooks/pre-merge-commit.sample @@ -0,0 +1,13 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git merge" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message to +# stderr if it wants to stop the merge commit. +# +# To enable this hook, rename this file to "pre-merge-commit". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" +: diff --git a/kubespray/old.git/hooks/pre-push.sample b/kubespray/old.git/hooks/pre-push.sample new file mode 100755 index 0000000..6187dbf --- /dev/null +++ b/kubespray/old.git/hooks/pre-push.sample @@ -0,0 +1,53 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +z40=0000000000000000000000000000000000000000 + +while read local_ref local_sha remote_ref remote_sha +do + if [ "$local_sha" = $z40 ] + then + # Handle delete + : + else + if [ "$remote_sha" = $z40 ] + then + # New branch, examine all commits + range="$local_sha" + else + # Update to existing branch, examine new commits + range="$remote_sha..$local_sha" + fi + + # Check for WIP commit + commit=`git rev-list -n 1 --grep '^WIP' "$range"` + if [ -n "$commit" ] + then + echo >&2 "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/kubespray/old.git/hooks/pre-rebase.sample b/kubespray/old.git/hooks/pre-rebase.sample new file mode 100755 index 0000000..6cbef5c --- /dev/null +++ b/kubespray/old.git/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up to date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +<<\DOC_END + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". + +DOC_END diff --git a/kubespray/old.git/hooks/pre-receive.sample b/kubespray/old.git/hooks/pre-receive.sample new file mode 100755 index 0000000..a1fd29e --- /dev/null +++ b/kubespray/old.git/hooks/pre-receive.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to make use of push options. +# The example simply echoes all push options that start with 'echoback=' +# and rejects all pushes when the "reject" push option is used. +# +# To enable this hook, rename this file to "pre-receive". + +if test -n "$GIT_PUSH_OPTION_COUNT" +then + i=0 + while test "$i" -lt "$GIT_PUSH_OPTION_COUNT" + do + eval "value=\$GIT_PUSH_OPTION_$i" + case "$value" in + echoback=*) + echo "echo from the pre-receive-hook: ${value#*=}" >&2 + ;; + reject) + exit 1 + esac + i=$((i + 1)) + done +fi diff --git a/kubespray/old.git/hooks/prepare-commit-msg.sample b/kubespray/old.git/hooks/prepare-commit-msg.sample new file mode 100755 index 0000000..10fa14c --- /dev/null +++ b/kubespray/old.git/hooks/prepare-commit-msg.sample @@ -0,0 +1,42 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first one removes the +# "# Please enter the commit message..." help message. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +COMMIT_MSG_FILE=$1 +COMMIT_SOURCE=$2 +SHA1=$3 + +/usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE" + +# case "$COMMIT_SOURCE,$SHA1" in +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;; +# *) ;; +# esac + +# SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE" +# if test -z "$COMMIT_SOURCE" +# then +# /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE" +# fi diff --git a/kubespray/old.git/hooks/update.sample b/kubespray/old.git/hooks/update.sample new file mode 100755 index 0000000..80ba941 --- /dev/null +++ b/kubespray/old.git/hooks/update.sample @@ -0,0 +1,128 @@ +#!/bin/sh +# +# An example hook script to block unannotated tags from entering. +# Called by "git receive-pack" with arguments: refname sha1-old sha1-new +# +# To enable this hook, rename this file to "update". +# +# Config +# ------ +# hooks.allowunannotated +# This boolean sets whether unannotated tags will be allowed into the +# repository. By default they won't be. +# hooks.allowdeletetag +# This boolean sets whether deleting tags will be allowed in the +# repository. By default they won't be. +# hooks.allowmodifytag +# This boolean sets whether a tag may be modified after creation. By default +# it won't be. +# hooks.allowdeletebranch +# This boolean sets whether deleting branches will be allowed in the +# repository. By default they won't be. +# hooks.denycreatebranch +# This boolean sets whether remotely creating branches will be denied +# in the repository. By default this is allowed. +# + +# --- Command line +refname="$1" +oldrev="$2" +newrev="$3" + +# --- Safety check +if [ -z "$GIT_DIR" ]; then + echo "Don't run this script from the command line." >&2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --bool hooks.allowunannotated) +allowdeletebranch=$(git config --bool hooks.allowdeletebranch) +denycreatebranch=$(git config --bool hooks.denycreatebranch) +allowdeletetag=$(git config --bool hooks.allowdeletetag) +allowmodifytag=$(git config --bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero="0000000000000000000000000000000000000000" +if [ "$newrev" = "$zero" ]; then + newrev_type=delete +else + newrev_type=$(git cat-file -t $newrev) +fi + +case "$refname","$newrev_type" in + refs/tags/*,commit) + # un-annotated tag + short_refname=${refname##refs/tags/} + if [ "$allowunannotated" != "true" ]; then + echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/kubespray/old.git/index b/kubespray/old.git/index new file mode 100644 index 0000000..313862a Binary files /dev/null and b/kubespray/old.git/index differ diff --git a/kubespray/old.git/info/exclude b/kubespray/old.git/info/exclude new file mode 100644 index 0000000..a5196d1 --- /dev/null +++ b/kubespray/old.git/info/exclude @@ -0,0 +1,6 @@ +# git ls-files --others --exclude-from=.git/info/exclude +# Lines that start with '#' are comments. +# For a project mostly in C, the following would be a good set of +# exclude patterns (uncomment them if you want to use them): +# *.[oa] +# *~ diff --git a/kubespray/old.git/logs/HEAD b/kubespray/old.git/logs/HEAD new file mode 100644 index 0000000..9196c05 --- /dev/null +++ b/kubespray/old.git/logs/HEAD @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 14699f5e989788e2087d22b533fab9ec63a15fe5 root 1672127857 +0000 clone: from https://github.com/kubernetes-sigs/kubespray.git diff --git a/kubespray/old.git/logs/refs/heads/master b/kubespray/old.git/logs/refs/heads/master new file mode 100644 index 0000000..9196c05 --- /dev/null +++ b/kubespray/old.git/logs/refs/heads/master @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 14699f5e989788e2087d22b533fab9ec63a15fe5 root 1672127857 +0000 clone: from https://github.com/kubernetes-sigs/kubespray.git diff --git a/kubespray/old.git/logs/refs/remotes/origin/HEAD b/kubespray/old.git/logs/refs/remotes/origin/HEAD new file mode 100644 index 0000000..9196c05 --- /dev/null +++ b/kubespray/old.git/logs/refs/remotes/origin/HEAD @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 14699f5e989788e2087d22b533fab9ec63a15fe5 root 1672127857 +0000 clone: from https://github.com/kubernetes-sigs/kubespray.git diff --git a/kubespray/old.git/objects/pack/pack-b9c570c8befd5adff71d4e4590f91c483e9655d8.idx b/kubespray/old.git/objects/pack/pack-b9c570c8befd5adff71d4e4590f91c483e9655d8.idx new file mode 100644 index 0000000..15e120d Binary files /dev/null and b/kubespray/old.git/objects/pack/pack-b9c570c8befd5adff71d4e4590f91c483e9655d8.idx differ diff --git a/kubespray/old.git/objects/pack/pack-b9c570c8befd5adff71d4e4590f91c483e9655d8.pack b/kubespray/old.git/objects/pack/pack-b9c570c8befd5adff71d4e4590f91c483e9655d8.pack new file mode 100644 index 0000000..a52654f Binary files /dev/null and b/kubespray/old.git/objects/pack/pack-b9c570c8befd5adff71d4e4590f91c483e9655d8.pack differ diff --git a/kubespray/old.git/packed-refs b/kubespray/old.git/packed-refs new file mode 100644 index 0000000..6b8fe68 --- /dev/null +++ b/kubespray/old.git/packed-refs @@ -0,0 +1,86 @@ +# pack-refs with: peeled fully-peeled sorted +3314f95be4832cd8b165ae9f009faa9fcf5fe95e refs/remotes/origin/floryut-patch-1 +14699f5e989788e2087d22b533fab9ec63a15fe5 refs/remotes/origin/master +2187882ee0d30036fd8d3df1e655e625ee13acb4 refs/remotes/origin/pre-commit-hook +ce0d111d7cc19d6c726740de57aca89464a00c1e refs/remotes/origin/release-2.10 +67167bd8d292b67e9c8bceccbc8fb230d05bd2ba refs/remotes/origin/release-2.11 +093d75f04e10560ea1862c145f745489784bfc44 refs/remotes/origin/release-2.12 +cd832eadea13c7d8bc2e628509c2808d0ce3d023 refs/remotes/origin/release-2.13 +c3814bb258217d38f3a0da2b501fdeeae92c1c58 refs/remotes/origin/release-2.14 +82e90091472b56f84e36e04b96c6de36e3250aa5 refs/remotes/origin/release-2.15 +c91a05f33064e180b0f512e41814e06e842089a9 refs/remotes/origin/release-2.16 +6ff35d0c6772d4dcb5a83dbf597e03e54b7dfecc refs/remotes/origin/release-2.17 +70d4f70c3cca7d2b32ee1d16ce55cb611a8756a9 refs/remotes/origin/release-2.18 +b75ee0b1118c234ddf4d488cb1d36a8a66c2a0f6 refs/remotes/origin/release-2.19 +c553912f939c9425c3c12e78f200ef721cf6738a refs/remotes/origin/release-2.20 +05dabb7e7b5eb7cd9a075064868bafe4dc1cf51f refs/remotes/origin/release-2.7 +d3f60799913539f247f8ba71297607ab15f13d4a refs/remotes/origin/release-2.8 +fc1edbe79dc533d6d58232d6147da4f48d5bfd56 refs/remotes/origin/release-2.9 +af8f39471498c9e30398a424d7a0f12ba4a350b8 refs/tags/1.3.0 +af8f39471498c9e30398a424d7a0f12ba4a350b8 refs/tags/1.3.0_k1.1.3 +cf472a6b4c3c011f1bc02c595c239906f617db9c refs/tags/1.4.0 +f49926413a0c031a027a5e128380a6295e0deba1 refs/tags/1.5.0 +a222be7fae1c87939e526c3bde5b08201590741a refs/tags/test-tag-1 +78e67aea8f9040a38aaa048059bd8071844d0398 refs/tags/v1.0 +0e48ce51cef8bc420d1037f3a8d04e1437df1561 refs/tags/v1.0.0 +62a192566409de023895ad6f715956871b50cca9 refs/tags/v1.0.1 +563be707286d2ce60a1b7fac2f14578f07cde6b1 refs/tags/v1.1 +ec77f046fb81c65bbb7dbaa2b18a244142cf61d8 refs/tags/v1.1.0 +b81a06424286480cd535d3788be8bd6fad12888d refs/tags/v1.1.3 +c9769965b8df88423545411911e9928d9932a3a6 refs/tags/v2.0.0 +9e1cd0df576284601ad3457a7708b96d68fc9c3e refs/tags/v2.0.1 +^031cf565ec3ccd3ebbe80eeef3454c3780e5c598 +41e41055f6a63c6a24f2789dc54dfcbd132e8479 refs/tags/v2.1.0 +^5fd2b151b973c28a6d0875bd453ccb8602c3aa77 +acae0fe4a36bd1d3cd267e72ad01126a72d1458a refs/tags/v2.1.1 +72ae7638bcc94c66afa8620dfa4ad9a9249327ea refs/tags/v2.1.2 +dcd9c9509bca2c2ad8ab6fb8b12d4fcb6ea242a6 refs/tags/v2.10.0 +d53782a7f1791d46c0e3779cbc9a258ad492c2b9 refs/tags/v2.10.3 +7d8da8348e095a5f0b160c1e05c4c399d201d1f0 refs/tags/v2.10.4 +86cc703c75768207e1943ddf8f6a8082d756cb83 refs/tags/v2.11.0 +b0ccda8a423b0cd26e620e554e1f4710f0774089 refs/tags/v2.11.1 +abe9b40602ce416fa229c0b3540b1b12f4d6d922 refs/tags/v2.11.2 +370a0635fae8b4904bb6bf7936884195d1ab327f refs/tags/v2.12.0 +34e883e6e2aee218756dae2636ae8eb585c7dcb3 refs/tags/v2.12.1 +093d75f04e10560ea1862c145f745489784bfc44 refs/tags/v2.12.10 +366fb084ef03a72ed99dfbce8f18f51b8ec0d7ac refs/tags/v2.12.2 +e1815303339da8c6241f7e61ae0b61a7e8912849 refs/tags/v2.12.3 +173314d9f14208065ab8f3ce7859f34f2db738a8 refs/tags/v2.12.4 +51d9e2f9b1db5264e00509de3848ea6b586600e6 refs/tags/v2.12.5 +860bafa62d8bc01d3a3531b246b2ffeaf4ecb557 refs/tags/v2.12.6 +29cfe2b8ebea86bd22a42d281a4296e984dd5a9b refs/tags/v2.12.7 +31526c0b7ae209fd160f728da38874de9745fd56 refs/tags/v2.12.8 +2acc5a75acbe2081420a00fe9a5f4f19efd5ccc9 refs/tags/v2.12.9 +01dbc909be34c9c8b34cb9d5e88a4f0e74affcbc refs/tags/v2.13.0 +31094b1768f26dd3078f1aa01c24d29e959723d2 refs/tags/v2.13.1 +3d6b9d6c15a89c44fd00ae227d893e36c118fe08 refs/tags/v2.13.2 +28ee071bd6b0813b5c67d3c3698ad2115d17aced refs/tags/v2.13.3 +d28a6d68f958018b72e0f2e270ec86ca1e6f7acf refs/tags/v2.13.4 +a1f04e986987d86485a541ac4ab9d8febe04c717 refs/tags/v2.14.0 +b39a196cfbfc63c368fd064b00137bc666340958 refs/tags/v2.14.1 +75d648cae53eb6b83acb9b75b868ea29eec480d3 refs/tags/v2.14.2 +a923f4e7c0692229c442b07a531bfb5fc41a23f9 refs/tags/v2.15.0 +4661e7db011a59af1f69d5fdc1435459d86e386a refs/tags/v2.15.1 +bcf695913f5332c0acf08b206cc055c9482664d9 refs/tags/v2.16.0 +b83e8b020a0da6befa852139fde01774f5d56a6b refs/tags/v2.17.0 +eeeca4a1d0334efebcf732d08bffc7e10240fc9c refs/tags/v2.17.1 +92f25bf267ffd3393f6caffa588169d3a44a799c refs/tags/v2.18.0 +e7508d7d215ebcfb1831d4aa0a19aae1f89de514 refs/tags/v2.18.1 +56f9af866816f449368ee83d6a228f27537edc3d refs/tags/v2.18.2 +1f65e6d3b5752f9a64d3038e45d705f272acae58 refs/tags/v2.19.0 +453dbcef1d10dd62481a836d26130f48073078d2 refs/tags/v2.19.1 +72a0d78b3cc828d8122d5f6d3270649d0d349bb9 refs/tags/v2.2.0 +3ff5f40bdb6955f05ee1e9e3e977cb4e16a30678 refs/tags/v2.2.1 +18efdc2c51c5881c8647c06d02f8b505c5712876 refs/tags/v2.20.0 +ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c refs/tags/v2.3.0 +f7d52564aad2ff8e337634951beb4a881c0e8aa6 refs/tags/v2.4.0 +02cd5418c22d51e40261775908d55bc562206023 refs/tags/v2.5.0 +8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba refs/tags/v2.6.0 +05dabb7e7b5eb7cd9a075064868bafe4dc1cf51f refs/tags/v2.7.0 +9051aa5296ef76fcff69a2e3827cef28752aa475 refs/tags/v2.8.0 +2ac1c7562f46bba6be210f5aaa80f8510ffd850c refs/tags/v2.8.1 +4167807f17c5451f19e955be2630ab0caac30cf1 refs/tags/v2.8.2 +ea41fc5e742daf525bf4f23f0709b2008eeb49fb refs/tags/v2.8.3 +3901480bc17d6e59164b5c5642deb77ec4552e41 refs/tags/v2.8.4 +6f97687d19baa8685a99e0e2cd0d61d9e63bfd0c refs/tags/v2.8.5 +a4e65c7ceb9dddb55120c44e44ff69a0ec97473f refs/tags/v2.9.0 diff --git a/kubespray/old.git/refs/heads/master b/kubespray/old.git/refs/heads/master new file mode 100644 index 0000000..c8b554f --- /dev/null +++ b/kubespray/old.git/refs/heads/master @@ -0,0 +1 @@ +14699f5e989788e2087d22b533fab9ec63a15fe5 diff --git a/kubespray/old.git/refs/remotes/origin/HEAD b/kubespray/old.git/refs/remotes/origin/HEAD new file mode 100644 index 0000000..6efe28f --- /dev/null +++ b/kubespray/old.git/refs/remotes/origin/HEAD @@ -0,0 +1 @@ +ref: refs/remotes/origin/master diff --git a/kubespray/recover-control-plane.yml b/kubespray/recover-control-plane.yml new file mode 100644 index 0000000..a8a2d0b --- /dev/null +++ b/kubespray/recover-control-plane.yml @@ -0,0 +1,33 @@ +--- +- name: Check ansible version + import_playbook: ansible_version.yml + +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml + +- hosts: bastion[0] + gather_facts: False + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults} + - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} + +- hosts: etcd[0] + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults} + - { role: recover_control_plane/etcd, when: "not etcd_kubeadm_enabled|default(false)" } + +- hosts: kube_control_plane[0] + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults} + - { role: recover_control_plane/control-plane } + +- import_playbook: cluster.yml + +- hosts: kube_control_plane + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults} + - { role: recover_control_plane/post-recover } diff --git a/kubespray/remove-node.yml b/kubespray/remove-node.yml new file mode 100644 index 0000000..b9fdb93 --- /dev/null +++ b/kubespray/remove-node.yml @@ -0,0 +1,50 @@ +--- +- name: Check ansible version + import_playbook: ansible_version.yml + +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml + +- hosts: bastion[0] + gather_facts: False + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } + +- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}" + gather_facts: no + tasks: + - name: Confirm Execution + pause: + prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes." + register: pause_result + run_once: True + when: + - not (skip_confirmation | default(false) | bool) + + - name: Fail if user does not confirm deletion + fail: + msg: "Delete nodes confirmation failed" + when: pause_result.user_input | default('yes') != 'yes' + +- name: Gather facts + import_playbook: facts.yml + when: reset_nodes|default(True)|bool + +- hosts: "{{ node | default('kube_node') }}" + gather_facts: no + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults, when: reset_nodes|default(True)|bool } + - { role: remove-node/pre-remove, tags: pre-remove } + - { role: remove-node/remove-etcd-node } + - { role: reset, tags: reset, when: reset_nodes|default(True)|bool } + +# Currently cannot remove first master or etcd +- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}" + gather_facts: no + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults, when: reset_nodes|default(True)|bool } + - { role: remove-node/post-remove, tags: post-remove } diff --git a/kubespray/requirements-2.11.txt b/kubespray/requirements-2.11.txt new file mode 100644 index 0000000..ef2e023 --- /dev/null +++ b/kubespray/requirements-2.11.txt @@ -0,0 +1,10 @@ +ansible==4.10.0 +ansible-core==2.11.11 +cryptography==3.4.8 +jinja2==2.11.3 +netaddr==0.7.19 +pbr==5.4.4 +jmespath==0.9.5 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.7 +MarkupSafe==1.1.1 diff --git a/kubespray/requirements-2.12.txt b/kubespray/requirements-2.12.txt new file mode 100644 index 0000000..722cc99 --- /dev/null +++ b/kubespray/requirements-2.12.txt @@ -0,0 +1,10 @@ +ansible==5.7.1 +ansible-core==2.12.5 +cryptography==3.4.8 +jinja2==2.11.3 +netaddr==0.7.19 +pbr==5.4.4 +jmespath==0.9.5 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.7 +MarkupSafe==1.1.1 diff --git a/kubespray/requirements.txt b/kubespray/requirements.txt new file mode 100644 index 0000000..722cc99 --- /dev/null +++ b/kubespray/requirements.txt @@ -0,0 +1,10 @@ +ansible==5.7.1 +ansible-core==2.12.5 +cryptography==3.4.8 +jinja2==2.11.3 +netaddr==0.7.19 +pbr==5.4.4 +jmespath==0.9.5 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.7 +MarkupSafe==1.1.1 diff --git a/kubespray/reset.yml b/kubespray/reset.yml new file mode 100644 index 0000000..2001570 --- /dev/null +++ b/kubespray/reset.yml @@ -0,0 +1,36 @@ +--- +- name: Check ansible version + import_playbook: ansible_version.yml + +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml + +- hosts: bastion[0] + gather_facts: False + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults} + - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} + +- name: Gather facts + import_playbook: facts.yml + +- hosts: etcd:k8s_cluster:calico_rr + gather_facts: False + vars_prompt: + name: "reset_confirmation" + prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster." + default: "no" + private: no + + pre_tasks: + - name: check confirmation + fail: + msg: "Reset confirmation failed" + when: reset_confirmation != "yes" + + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults} + - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true } + - { role: reset, tags: reset } diff --git a/kubespray/roles/adduser/defaults/main.yml b/kubespray/roles/adduser/defaults/main.yml new file mode 100644 index 0000000..faf258d --- /dev/null +++ b/kubespray/roles/adduser/defaults/main.yml @@ -0,0 +1,27 @@ +--- +kube_owner: kube +kube_cert_group: kube-cert +etcd_data_dir: "/var/lib/etcd" + +addusers: + etcd: + name: etcd + comment: "Etcd user" + create_home: no + system: yes + shell: /sbin/nologin + kube: + name: kube + comment: "Kubernetes user" + create_home: no + system: yes + shell: /sbin/nologin + group: "{{ kube_cert_group }}" + +adduser: + name: "{{ user.name }}" + group: "{{ user.name|default(None) }}" + comment: "{{ user.comment|default(None) }}" + shell: "{{ user.shell|default(None) }}" + system: "{{ user.system|default(None) }}" + create_home: "{{ user.create_home|default(None) }}" diff --git a/kubespray/roles/adduser/molecule/default/converge.yml b/kubespray/roles/adduser/molecule/default/converge.yml new file mode 100644 index 0000000..47ff6c7 --- /dev/null +++ b/kubespray/roles/adduser/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: Converge + hosts: all + become: true + gather_facts: false + roles: + - role: adduser + vars: + user: + name: foo diff --git a/kubespray/roles/adduser/molecule/default/molecule.yml b/kubespray/roles/adduser/molecule/default/molecule.yml new file mode 100644 index 0000000..617677e --- /dev/null +++ b/kubespray/roles/adduser/molecule/default/molecule.yml @@ -0,0 +1,27 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: adduser-01 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/adduser/molecule/default/tests/test_default.py b/kubespray/roles/adduser/molecule/default/tests/test_default.py new file mode 100644 index 0000000..4c81047 --- /dev/null +++ b/kubespray/roles/adduser/molecule/default/tests/test_default.py @@ -0,0 +1,37 @@ +import os +import yaml +import glob +import testinfra.utils.ansible_runner +from ansible.playbook import Playbook +from ansible.cli.playbook import PlaybookCLI + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + +def read_playbook(playbook): + cli_args = [os.path.realpath(playbook), testinfra_hosts] + cli = PlaybookCLI(cli_args) + cli.parse() + loader, inventory, variable_manager = cli._play_prereqs() + + pb = Playbook.load(cli.args[0], variable_manager, loader) + + for play in pb.get_plays(): + yield variable_manager.get_vars(play) + +def get_playbook(): + with open(os.path.realpath(' '.join(map(str,glob.glob('molecule.*')))), 'r') as yamlfile: + data = yaml.load(yamlfile, Loader=yaml.FullLoader) + if 'playbooks' in data['provisioner'].keys(): + if 'converge' in data['provisioner']['playbooks'].keys(): + return data['provisioner']['playbooks']['converge'] + else: + return ' '.join(map(str,glob.glob('converge.*'))) + +def test_user(host): + for vars in read_playbook(get_playbook()): + assert host.user(vars['user']['name']).exists + if 'group' in vars['user'].keys(): + assert host.group(vars['user']['group']).exists + else: + assert host.group(vars['user']['name']).exists diff --git a/kubespray/roles/adduser/tasks/main.yml b/kubespray/roles/adduser/tasks/main.yml new file mode 100644 index 0000000..51dd5bb --- /dev/null +++ b/kubespray/roles/adduser/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: User | Create User Group + group: + name: "{{ user.group|default(user.name) }}" + system: "{{ user.system|default(omit) }}" + +- name: User | Create User + user: + comment: "{{ user.comment|default(omit) }}" + create_home: "{{ user.create_home|default(omit) }}" + group: "{{ user.group|default(user.name) }}" + home: "{{ user.home|default(omit) }}" + shell: "{{ user.shell|default(omit) }}" + name: "{{ user.name }}" + system: "{{ user.system|default(omit) }}" + when: user.name != "root" diff --git a/kubespray/roles/adduser/vars/coreos.yml b/kubespray/roles/adduser/vars/coreos.yml new file mode 100644 index 0000000..5c258df --- /dev/null +++ b/kubespray/roles/adduser/vars/coreos.yml @@ -0,0 +1,8 @@ +--- +addusers: + - name: kube + comment: "Kubernetes user" + shell: /sbin/nologin + system: yes + group: "{{ kube_cert_group }}" + create_home: no diff --git a/kubespray/roles/adduser/vars/debian.yml b/kubespray/roles/adduser/vars/debian.yml new file mode 100644 index 0000000..99e5b38 --- /dev/null +++ b/kubespray/roles/adduser/vars/debian.yml @@ -0,0 +1,15 @@ +--- +addusers: + - name: etcd + comment: "Etcd user" + create_home: yes + home: "{{ etcd_data_dir }}" + system: yes + shell: /sbin/nologin + + - name: kube + comment: "Kubernetes user" + create_home: no + system: yes + shell: /sbin/nologin + group: "{{ kube_cert_group }}" diff --git a/kubespray/roles/adduser/vars/redhat.yml b/kubespray/roles/adduser/vars/redhat.yml new file mode 100644 index 0000000..99e5b38 --- /dev/null +++ b/kubespray/roles/adduser/vars/redhat.yml @@ -0,0 +1,15 @@ +--- +addusers: + - name: etcd + comment: "Etcd user" + create_home: yes + home: "{{ etcd_data_dir }}" + system: yes + shell: /sbin/nologin + + - name: kube + comment: "Kubernetes user" + create_home: no + system: yes + shell: /sbin/nologin + group: "{{ kube_cert_group }}" diff --git a/kubespray/roles/bastion-ssh-config/defaults/main.yml b/kubespray/roles/bastion-ssh-config/defaults/main.yml new file mode 100644 index 0000000..d322814 --- /dev/null +++ b/kubespray/roles/bastion-ssh-config/defaults/main.yml @@ -0,0 +1,2 @@ +--- +ssh_bastion_confing__name: ssh-bastion.conf \ No newline at end of file diff --git a/kubespray/roles/bastion-ssh-config/molecule/default/converge.yml b/kubespray/roles/bastion-ssh-config/molecule/default/converge.yml new file mode 100644 index 0000000..54a6247 --- /dev/null +++ b/kubespray/roles/bastion-ssh-config/molecule/default/converge.yml @@ -0,0 +1,15 @@ +--- +- name: Converge + hosts: all + become: true + gather_facts: false + roles: + - role: bastion-ssh-config + tasks: + - name: Copy config to remote host + copy: + src: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}" + dest: "{{ ssh_bastion_confing__name }}" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: 0644 diff --git a/kubespray/roles/bastion-ssh-config/molecule/default/molecule.yml b/kubespray/roles/bastion-ssh-config/molecule/default/molecule.yml new file mode 100644 index 0000000..5cadd6e --- /dev/null +++ b/kubespray/roles/bastion-ssh-config/molecule/default/molecule.yml @@ -0,0 +1,35 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: bastion-01 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + inventory: + hosts: + all: + hosts: + children: + bastion: + hosts: + bastion-01: +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/bastion-ssh-config/molecule/default/tests/test_default.py b/kubespray/roles/bastion-ssh-config/molecule/default/tests/test_default.py new file mode 100644 index 0000000..f98faa4 --- /dev/null +++ b/kubespray/roles/bastion-ssh-config/molecule/default/tests/test_default.py @@ -0,0 +1,34 @@ +import os +import yaml +import glob +import testinfra.utils.ansible_runner +from ansible.playbook import Playbook +from ansible.cli.playbook import PlaybookCLI + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + +def read_playbook(playbook): + cli_args = [os.path.realpath(playbook), testinfra_hosts] + cli = PlaybookCLI(cli_args) + cli.parse() + loader, inventory, variable_manager = cli._play_prereqs() + + pb = Playbook.load(cli.args[0], variable_manager, loader) + + for play in pb.get_plays(): + yield variable_manager.get_vars(play) + +def get_playbook(): + with open(os.path.realpath(' '.join(map(str,glob.glob('molecule.*')))), 'r') as yamlfile: + data = yaml.load(yamlfile, Loader=yaml.FullLoader) + if 'playbooks' in data['provisioner'].keys(): + if 'converge' in data['provisioner']['playbooks'].keys(): + return data['provisioner']['playbooks']['converge'] + else: + return ' '.join(map(str,glob.glob('converge.*'))) + +def test_ssh_config(host): + for vars in read_playbook(get_playbook()): + assert host.file(vars['ssh_bastion_confing__name']).exists + assert host.file(vars['ssh_bastion_confing__name']).is_file diff --git a/kubespray/roles/bastion-ssh-config/tasks/main.yml b/kubespray/roles/bastion-ssh-config/tasks/main.yml new file mode 100644 index 0000000..a18291b --- /dev/null +++ b/kubespray/roles/bastion-ssh-config/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: set bastion host IP and port + set_fact: + bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}" + bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}" + delegate_to: localhost + connection: local + +# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly +# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user +- name: Store the current ansible_user in the real_user fact + set_fact: + real_user: "{{ ansible_user }}" + +- name: create ssh bastion conf + become: false + delegate_to: localhost + connection: local + template: + src: "{{ ssh_bastion_confing__name }}.j2" + dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}" + mode: 0640 diff --git a/kubespray/roles/bastion-ssh-config/templates/ssh-bastion.conf.j2 b/kubespray/roles/bastion-ssh-config/templates/ssh-bastion.conf.j2 new file mode 100644 index 0000000..bd5f49c --- /dev/null +++ b/kubespray/roles/bastion-ssh-config/templates/ssh-bastion.conf.j2 @@ -0,0 +1,18 @@ +{% set vars={'hosts': ''} %} +{% set user='' %} + +{% for h in groups['all'] %} +{% if h not in groups['bastion'] %} +{% if vars.update({'hosts': vars['hosts'] + ' ' + (hostvars[h].get('ansible_ssh_host') or hostvars[h]['ansible_host'])}) %}{% endif %} +{% endif %} +{% endfor %} + +Host {{ bastion_ip }} + Hostname {{ bastion_ip }} + StrictHostKeyChecking no + ControlMaster auto + ControlPath ~/.ssh/ansible-%r@%h:%p + ControlPersist 5m + +Host {{ vars['hosts'] }} + ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -p {{ bastion_port }} {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} diff --git a/kubespray/roles/bootstrap-os/defaults/main.yml b/kubespray/roles/bootstrap-os/defaults/main.yml new file mode 100644 index 0000000..9b31456 --- /dev/null +++ b/kubespray/roles/bootstrap-os/defaults/main.yml @@ -0,0 +1,32 @@ +--- +## CentOS/RHEL/AlmaLinux specific variables +# Use the fastestmirror yum plugin +centos_fastestmirror_enabled: false + +## Flatcar Container Linux specific variables +# Disable locksmithd or leave it in its current state +coreos_locksmithd_disable: false + +## Oracle Linux specific variables +# Install public repo on Oracle Linux +use_oracle_public_repo: true + +fedora_coreos_packages: + - python + - python3-libselinux + - ethtool # required in kubeadm preflight phase for verifying the environment + - ipset # required in kubeadm preflight phase for verifying the environment + - conntrack-tools # required by kube-proxy + +## General +# Set the hostname to inventory_hostname +override_system_hostname: true + +is_fedora_coreos: false + +skip_http_proxy_on_os_packages: false + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false diff --git a/kubespray/roles/bootstrap-os/files/bootstrap.sh b/kubespray/roles/bootstrap-os/files/bootstrap.sh new file mode 100755 index 0000000..69b7b75 --- /dev/null +++ b/kubespray/roles/bootstrap-os/files/bootstrap.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -e + +BINDIR="/opt/bin" +if [[ -e $BINDIR/.bootstrapped ]]; then + exit 0 +fi + +ARCH=$(uname -m) +case $ARCH in + "x86_64") + PYPY_ARCH=linux64 + PYPI_HASH=46818cb3d74b96b34787548343d266e2562b531ddbaf330383ba930ff1930ed5 + ;; + "aarch64") + PYPY_ARCH=aarch64 + PYPI_HASH=2e1ae193d98bc51439642a7618d521ea019f45b8fb226940f7e334c548d2b4b9 + ;; + *) + echo "Unsupported Architecture: ${ARCH}" + exit 1 +esac + +PYTHON_VERSION=3.9 +PYPY_VERSION=7.3.9 +PYPY_FILENAME="pypy${PYTHON_VERSION}-v${PYPY_VERSION}-${PYPY_ARCH}" +PYPI_URL="https://downloads.python.org/pypy/${PYPY_FILENAME}.tar.bz2" + +mkdir -p $BINDIR + +cd $BINDIR + +TAR_FILE=pyp.tar.bz2 +wget -O "${TAR_FILE}" "${PYPI_URL}" +echo "${PYPI_HASH} ${TAR_FILE}" | sha256sum -c - +tar -xjf "${TAR_FILE}" && rm "${TAR_FILE}" +mv -n "${PYPY_FILENAME}" pypy3 + +ln -s ./pypy3/bin/pypy3 python +$BINDIR/python --version + +touch $BINDIR/.bootstrapped diff --git a/kubespray/roles/bootstrap-os/handlers/main.yml b/kubespray/roles/bootstrap-os/handlers/main.yml new file mode 100644 index 0000000..7c8c4fe --- /dev/null +++ b/kubespray/roles/bootstrap-os/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: RHEL auto-attach subscription + command: /sbin/subscription-manager attach --auto + become: true diff --git a/kubespray/roles/bootstrap-os/molecule/default/converge.yml b/kubespray/roles/bootstrap-os/molecule/default/converge.yml new file mode 100644 index 0000000..1f44ec9 --- /dev/null +++ b/kubespray/roles/bootstrap-os/molecule/default/converge.yml @@ -0,0 +1,6 @@ +--- +- name: Converge + hosts: all + gather_facts: no + roles: + - role: bootstrap-os diff --git a/kubespray/roles/bootstrap-os/molecule/default/molecule.yml b/kubespray/roles/bootstrap-os/molecule/default/molecule.yml new file mode 100644 index 0000000..8413baa --- /dev/null +++ b/kubespray/roles/bootstrap-os/molecule/default/molecule.yml @@ -0,0 +1,57 @@ +--- +dependency: + name: galaxy +lint: | + set -e + yamllint -c ../../.yamllint . +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: ubuntu16 + box: generic/ubuntu1604 + cpus: 1 + memory: 512 + - name: ubuntu18 + box: generic/ubuntu1804 + cpus: 1 + memory: 512 + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 512 + - name: centos7 + box: centos/7 + cpus: 1 + memory: 512 + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 512 + - name: debian9 + box: generic/debian9 + cpus: 1 + memory: 512 + - name: debian10 + box: generic/debian10 + cpus: 1 + memory: 512 +provisioner: + name: ansible + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + inventory: + group_vars: + all: + user: + name: foo + comment: My test comment +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/bootstrap-os/molecule/default/tests/test_default.py b/kubespray/roles/bootstrap-os/molecule/default/tests/test_default.py new file mode 100644 index 0000000..64c59dd --- /dev/null +++ b/kubespray/roles/bootstrap-os/molecule/default/tests/test_default.py @@ -0,0 +1,11 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE'] +).get_hosts('all') + + +def test_python(host): + assert host.exists('python3') or host.exists('python') diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-amazon.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-amazon.yml new file mode 100644 index 0000000..2b4d665 --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-amazon.yml @@ -0,0 +1,13 @@ +--- +- name: Enable EPEL repo for Amazon Linux + yum_repository: + name: epel + file: epel + description: Extra Packages for Enterprise Linux 7 - $basearch + baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch + gpgcheck: yes + gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no + when: epel_enabled diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-centos.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-centos.yml new file mode 100644 index 0000000..007fdce --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -0,0 +1,117 @@ +--- +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined + ini_file: + path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +# For Oracle Linux install public repo +- name: Download Oracle Linux public yum repo + get_url: + url: https://yum.oracle.com/public-yum-ol7.repo + dest: /etc/yum.repos.d/public-yum-ol7.repo + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) < 7.6 + environment: "{{ proxy_env }}" + +- name: Enable Oracle Linux repo + ini_file: + dest: /etc/yum.repos.d/public-yum-ol7.repo + section: "{{ item }}" + option: enabled + value: "1" + mode: 0644 + with_items: + - ol7_latest + - ol7_addons + - ol7_developer_EPEL + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) < 7.6 + +- name: Install EPEL for Oracle Linux repo package + package: + name: "oracle-epel-release-el{{ ansible_distribution_major_version }}" + state: present + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + +- name: Enable Oracle Linux repo + ini_file: + dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo" + section: "ol{{ ansible_distribution_major_version }}_addons" + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" } + - { option: "enabled", value: "1" } + - { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/$basearch/" } + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + +- name: Enable Centos extra repo for Oracle Linux + ini_file: + dest: "/etc/yum.repos.d/centos-extras.repo" + section: "extras" + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" } + - { option: "enabled", value: "1" } + - { option: "gpgcheck", value: "0" } + - { option: "baseurl", value: "http://mirror.centos.org/centos/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version|int > 7 %}os/{% endif %}" } + when: + - use_oracle_public_repo|default(true) + - '''ID="ol"'' in os_release.stdout_lines' + - (ansible_distribution_version | float) >= 7.6 + - (ansible_distribution_version | float) < 9 + +# CentOS ships with python installed + +- name: Check presence of fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf + get_attributes: no + get_checksum: no + get_mime: no + register: fastestmirror + +# the fastestmirror plugin can actually slow down Ansible deployments +- name: Disable fastestmirror plugin if requested + lineinfile: + dest: /etc/yum/pluginconf.d/fastestmirror.conf + regexp: "^enabled=.*" + line: "enabled=0" + state: present + become: true + when: + - fastestmirror.stat.exists + - not centos_fastestmirror_enabled + +# libselinux-python is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux python package + package: + name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + state: present + become: true diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml new file mode 100644 index 0000000..de42e3c --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-clearlinux.yml @@ -0,0 +1,16 @@ +--- +# ClearLinux ships with Python installed + +- name: Install basic package to run containers + package: + name: containers-basic + state: present + +- name: Make sure docker service is enabled + systemd: + name: docker + masked: false + enabled: true + daemon_reload: true + state: started + become: true diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-coreos.yml new file mode 100644 index 0000000..737a7ec --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -0,0 +1,37 @@ +--- +# CoreOS ships without Python installed + +- name: Check if bootstrap is needed + raw: stat /opt/bin/.bootstrapped + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Force binaries directory for Container Linux by CoreOS and Flatcar + set_fact: + bin_dir: "/opt/bin" + tags: + - facts + +- name: Run bootstrap.sh + script: bootstrap.sh + become: true + environment: "{{ proxy_env }}" + when: + - need_bootstrap.rc != 0 + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "{{ bin_dir }}/python" + tags: + - facts + +- name: Disable auto-upgrade + systemd: + name: locksmithd.service + masked: true + state: stopped + when: + - coreos_locksmithd_disable diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-debian.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-debian.yml new file mode 100644 index 0000000..47bad20 --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-debian.yml @@ -0,0 +1,76 @@ +--- +# Some Debian based distros ship without Python installed + +- name: Check if bootstrap is needed + raw: which python3 + register: need_bootstrap + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + tags: + - facts + +- name: Check http::proxy in apt configuration files + raw: apt-config dump | grep -qsi 'Acquire::http::proxy' + register: need_http_proxy + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- name: Add http_proxy to /etc/apt/apt.conf if http_proxy is defined + raw: echo 'Acquire::http::proxy "{{ http_proxy }}";' >> /etc/apt/apt.conf + become: true + when: + - http_proxy is defined + - need_http_proxy.rc != 0 + - not skip_http_proxy_on_os_packages + +- name: Check https::proxy in apt configuration files + raw: apt-config dump | grep -qsi 'Acquire::https::proxy' + register: need_https_proxy + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- name: Add https_proxy to /etc/apt/apt.conf if https_proxy is defined + raw: echo 'Acquire::https::proxy "{{ https_proxy }}";' >> /etc/apt/apt.conf + become: true + when: + - https_proxy is defined + - need_https_proxy.rc != 0 + - not skip_http_proxy_on_os_packages + +- name: Install python3 + raw: + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y python3-minimal + become: true + when: + - need_bootstrap.rc != 0 + +- name: Update Apt cache + raw: apt-get update --allow-releaseinfo-change + become: true + when: + - '''ID=debian'' in os_release.stdout_lines' + - '''VERSION_ID="10"'' in os_release.stdout_lines or ''VERSION_ID="11"'' in os_release.stdout_lines' + register: bootstrap_update_apt_result + changed_when: + - '"changed its" in bootstrap_update_apt_result.stdout' + - '"value from" in bootstrap_update_apt_result.stdout' + ignore_errors: true + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "/usr/bin/python3" + +# Workaround for https://github.com/ansible/ansible/issues/25543 +- name: Install dbus for the hostname module + package: + name: dbus + state: present + use: apt + become: true diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml new file mode 100644 index 0000000..d3fd1c9 --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml @@ -0,0 +1,46 @@ +--- + +- name: Check if bootstrap is needed + raw: which python + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Remove podman network cni + raw: "podman network rm podman" + become: true + ignore_errors: true # noqa ignore-errors + when: need_bootstrap.rc != 0 + +- name: Clean up possible pending packages on fedora coreos + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree cleanup -p }}" + become: true + when: need_bootstrap.rc != 0 + +- name: Install required packages on fedora coreos + raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages|join(' ') }}" + become: true + when: need_bootstrap.rc != 0 + +- name: Reboot immediately for updated ostree + raw: "nohup bash -c 'sleep 5s && shutdown -r now'" + become: true + ignore_errors: true # noqa ignore-errors + ignore_unreachable: yes + when: need_bootstrap.rc != 0 + +- name: Wait for the reboot to complete + wait_for_connection: + timeout: 240 + connect_timeout: 20 + delay: 5 + sleep: 5 + when: need_bootstrap.rc != 0 + +- name: Store the fact if this is an fedora core os host + set_fact: + is_fedora_coreos: True + tags: + - facts diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-fedora.yml new file mode 100644 index 0000000..1613173 --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-fedora.yml @@ -0,0 +1,36 @@ +--- +# Some Fedora based distros ship without Python installed + +- name: Check if bootstrap is needed + raw: which python + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Add proxy to dnf.conf if http_proxy is defined + ini_file: + path: "/etc/dnf/dnf.conf" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +- name: Install python3 on fedora + raw: "dnf install --assumeyes --quiet python3" + become: true + when: + - need_bootstrap.rc != 0 + +# libselinux-python3 is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux-python3 + package: + name: libselinux-python3 + state: present + become: true diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-flatcar.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-flatcar.yml new file mode 100644 index 0000000..b0f3a9e --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-flatcar.yml @@ -0,0 +1,37 @@ +--- +# Flatcar Container Linux ships without Python installed + +- name: Check if bootstrap is needed + raw: stat /opt/bin/.bootstrapped + register: need_bootstrap + failed_when: false + changed_when: false + tags: + - facts + +- name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + tags: + - facts + +- name: Run bootstrap.sh + script: bootstrap.sh + become: true + environment: "{{ proxy_env }}" + when: + - need_bootstrap.rc != 0 + +- name: Set the ansible_python_interpreter fact + set_fact: + ansible_python_interpreter: "{{ bin_dir }}/python" + tags: + - facts + +- name: Disable auto-upgrade + systemd: + name: locksmithd.service + masked: true + state: stopped + when: + - coreos_locksmithd_disable diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-opensuse.yml new file mode 100644 index 0000000..c833bfd --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-opensuse.yml @@ -0,0 +1,85 @@ +--- +# OpenSUSE ships with Python installed +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Check that /etc/sysconfig/proxy file exists + stat: + path: /etc/sysconfig/proxy + get_attributes: no + get_checksum: no + get_mime: no + register: stat_result + +- name: Create the /etc/sysconfig/proxy empty file + file: # noqa risky-file-permissions + path: /etc/sysconfig/proxy + state: touch + when: + - http_proxy is defined or https_proxy is defined + - not stat_result.stat.exists + +- name: Set the http_proxy in /etc/sysconfig/proxy + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^HTTP_PROXY=' + line: 'HTTP_PROXY="{{ http_proxy }}"' + become: true + when: + - http_proxy is defined + +- name: Set the https_proxy in /etc/sysconfig/proxy + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^HTTPS_PROXY=' + line: 'HTTPS_PROXY="{{ https_proxy }}"' + become: true + when: + - https_proxy is defined + +- name: Enable proxies + lineinfile: + path: /etc/sysconfig/proxy + regexp: '^PROXY_ENABLED=' + line: 'PROXY_ENABLED="yes"' + become: true + when: + - http_proxy is defined or https_proxy is defined + +# Required for zypper module +- name: Install python-xml + shell: zypper refresh && zypper --non-interactive install python-xml + changed_when: false + become: true + tags: + - facts + +# Without this package, the get_url module fails when trying to handle https +- name: Install python-cryptography + zypper: + name: python-cryptography + state: present + update_cache: true + become: true + when: + - ansible_distribution_version is version('15.4', '<') + +- name: Install python3-cryptography + zypper: + name: python3-cryptography + state: present + update_cache: true + become: true + when: + - ansible_distribution_version is version('15.4', '>=') + +# Nerdctl needs some basic packages to get an environment up +- name: Install basic dependencies + zypper: + name: + - iptables + - apparmor-parser + state: present + become: true diff --git a/kubespray/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/kubespray/roles/bootstrap-os/tasks/bootstrap-redhat.yml new file mode 100644 index 0000000..8f32388 --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/bootstrap-redhat.yml @@ -0,0 +1,121 @@ +--- +- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version + setup: + gather_subset: '!all' + filter: ansible_distribution_*version + +- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined + ini_file: + path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}" + section: main + option: proxy + value: "{{ http_proxy | default(omit) }}" + state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" + no_extra_spaces: true + mode: 0644 + become: true + when: not skip_http_proxy_on_os_packages + +- name: Add proxy to RHEL subscription-manager if http_proxy is defined + command: /sbin/subscription-manager config --server.proxy_hostname={{ http_proxy | regex_replace(':\d+$') }} --server.proxy_port={{ http_proxy | regex_replace('^.*:') }} + become: true + when: + - not skip_http_proxy_on_os_packages + - http_proxy is defined + +- name: Check RHEL subscription-manager status + command: /sbin/subscription-manager status + register: rh_subscription_status + changed_when: "rh_subscription_status != 0" + ignore_errors: true # noqa ignore-errors + become: true + +- name: RHEL subscription Organization ID/Activation Key registration + redhat_subscription: + state: present + org_id: "{{ rh_subscription_org_id }}" + activationkey: "{{ rh_subscription_activation_key }}" + auto_attach: true + force_register: true + syspurpose: + usage: "{{ rh_subscription_usage }}" + role: "{{ rh_subscription_role }}" + service_level_agreement: "{{ rh_subscription_sla }}" + sync: true + notify: RHEL auto-attach subscription + ignore_errors: true # noqa ignore-errors + become: true + when: + - rh_subscription_org_id is defined + - rh_subscription_status.changed + +# this task has no_log set to prevent logging security sensitive information such as subscription passwords +- name: RHEL subscription Username/Password registration + redhat_subscription: + state: present + username: "{{ rh_subscription_username }}" + password: "{{ rh_subscription_password }}" + auto_attach: true + force_register: true + syspurpose: + usage: "{{ rh_subscription_usage }}" + role: "{{ rh_subscription_role }}" + service_level_agreement: "{{ rh_subscription_sla }}" + sync: true + notify: RHEL auto-attach subscription + ignore_errors: true # noqa ignore-errors + become: true + no_log: "{{ not (unsafe_show_logs|bool) }}" + when: + - rh_subscription_username is defined + - rh_subscription_status.changed + +# container-selinux is in extras repo +- name: Enable RHEL 7 repos + rhsm_repository: + name: + - "rhel-7-server-rpms" + - "rhel-7-server-extras-rpms" + state: enabled + when: + - rhel_enable_repos | default(True) | bool + - ansible_distribution_major_version == "7" + +# container-selinux is in appstream repo +- name: Enable RHEL 8 repos + rhsm_repository: + name: + - "rhel-8-for-*-baseos-rpms" + - "rhel-8-for-*-appstream-rpms" + state: enabled + when: + - rhel_enable_repos | default(True) | bool + - ansible_distribution_major_version == "8" + +- name: Check presence of fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf + get_attributes: no + get_checksum: no + get_mime: no + register: fastestmirror + +# the fastestmirror plugin can actually slow down Ansible deployments +- name: Disable fastestmirror plugin if requested + lineinfile: + dest: /etc/yum/pluginconf.d/fastestmirror.conf + regexp: "^enabled=.*" + line: "enabled=0" + state: present + become: true + when: + - fastestmirror.stat.exists + - not centos_fastestmirror_enabled + +# libselinux-python is required on SELinux enabled hosts +# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements +- name: Install libselinux python package + package: + name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + state: present + become: true diff --git a/kubespray/roles/bootstrap-os/tasks/main.yml b/kubespray/roles/bootstrap-os/tasks/main.yml new file mode 100644 index 0000000..853ce09 --- /dev/null +++ b/kubespray/roles/bootstrap-os/tasks/main.yml @@ -0,0 +1,100 @@ +--- +- name: Fetch /etc/os-release + raw: cat /etc/os-release + register: os_release + changed_when: false + # This command should always run, even in check mode + check_mode: false + +- include_tasks: bootstrap-centos.yml + when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-amazon.yml + when: '''ID="amzn"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-redhat.yml + when: '''ID="rhel"'' in os_release.stdout_lines' + +- include_tasks: bootstrap-clearlinux.yml + when: '''ID=clear-linux-os'' in os_release.stdout_lines' + +# Fedora CoreOS +- include_tasks: bootstrap-fedora-coreos.yml + when: + - '''ID=fedora'' in os_release.stdout_lines' + - '''VARIANT_ID=coreos'' in os_release.stdout_lines' + +- include_tasks: bootstrap-flatcar.yml + when: '''ID=flatcar'' in os_release.stdout_lines' + +- include_tasks: bootstrap-debian.yml + when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines' + +# Fedora "classic" +- include_tasks: bootstrap-fedora.yml + when: + - '''ID=fedora'' in os_release.stdout_lines' + - '''VARIANT_ID=coreos'' not in os_release.stdout_lines' + +- include_tasks: bootstrap-opensuse.yml + when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines' + +- name: Create remote_tmp for it is used by another module + file: + path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}" + state: directory + mode: 0700 + +# Workaround for https://github.com/ansible/ansible/issues/42726 +# (1/3) +- name: Gather host facts to get ansible_os_family + setup: + gather_subset: '!all' + filter: ansible_* + +- name: Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora) + hostname: + name: "{{ inventory_hostname }}" + when: + - override_system_hostname + - ansible_os_family not in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + - not ansible_distribution == "Fedora" + - not is_fedora_coreos + +# (2/3) +- name: Assign inventory name to unconfigured hostnames (CoreOS, Flatcar, Suse, ClearLinux and Fedora only) + command: "hostnamectl set-hostname {{ inventory_hostname }}" + register: hostname_changed + become: true + changed_when: false + when: > + override_system_hostname + and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + or is_fedora_coreos + or ansible_distribution == "Fedora") + +# (3/3) +- name: Update hostname fact (CoreOS, Flatcar, Suse, ClearLinux and Fedora only) + setup: + gather_subset: '!all' + filter: ansible_hostname + when: > + override_system_hostname + and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux'] + or is_fedora_coreos + or ansible_distribution == "Fedora") + +- name: Install ceph-commmon package + package: + name: + - ceph-common + state: present + when: rbd_provisioner_enabled|default(false) + +- name: Ensure bash_completion.d folder exists + file: + name: /etc/bash_completion.d/ + state: directory + owner: root + group: root + mode: 0755 diff --git a/kubespray/roles/container-engine/containerd-common/defaults/main.yml b/kubespray/roles/container-engine/containerd-common/defaults/main.yml new file mode 100644 index 0000000..ae1c6e0 --- /dev/null +++ b/kubespray/roles/container-engine/containerd-common/defaults/main.yml @@ -0,0 +1,17 @@ +--- +# We keep these variables around to allow migration from package +# manager controlled installs to direct download ones. +containerd_package: 'containerd.io' +yum_repo_dir: /etc/yum.repos.d + +# Keep minimal repo information around for cleanup +containerd_repo_info: + repos: + +# Ubuntu docker-ce repo +containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +containerd_ubuntu_repo_component: "stable" + +# Debian docker-ce repo +containerd_debian_repo_base_url: "https://download.docker.com/linux/debian" +containerd_debian_repo_component: "stable" diff --git a/kubespray/roles/container-engine/containerd-common/meta/main.yml b/kubespray/roles/container-engine/containerd-common/meta/main.yml new file mode 100644 index 0000000..a4159c5 --- /dev/null +++ b/kubespray/roles/container-engine/containerd-common/meta/main.yml @@ -0,0 +1,2 @@ +--- +allow_duplicates: true \ No newline at end of file diff --git a/kubespray/roles/container-engine/containerd-common/tasks/main.yml b/kubespray/roles/container-engine/containerd-common/tasks/main.yml new file mode 100644 index 0000000..cfd78f3 --- /dev/null +++ b/kubespray/roles/container-engine/containerd-common/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: containerd-common | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: containerd-common | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: containerd-common | gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + tags: + - facts diff --git a/kubespray/roles/container-engine/containerd-common/vars/amazon.yml b/kubespray/roles/container-engine/containerd-common/vars/amazon.yml new file mode 100644 index 0000000..0568169 --- /dev/null +++ b/kubespray/roles/container-engine/containerd-common/vars/amazon.yml @@ -0,0 +1,2 @@ +--- +containerd_package: containerd diff --git a/kubespray/roles/container-engine/containerd-common/vars/suse.yml b/kubespray/roles/container-engine/containerd-common/vars/suse.yml new file mode 100644 index 0000000..0568169 --- /dev/null +++ b/kubespray/roles/container-engine/containerd-common/vars/suse.yml @@ -0,0 +1,2 @@ +--- +containerd_package: containerd diff --git a/kubespray/roles/container-engine/containerd/defaults/main.yml b/kubespray/roles/container-engine/containerd/defaults/main.yml new file mode 100644 index 0000000..83115c4 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/defaults/main.yml @@ -0,0 +1,75 @@ +--- +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_systemd_dir: "/etc/systemd/system/containerd.service.d" +# The default value is not -999 here because containerd's oom_score_adj has been +# set to the -999 even if containerd_oom_score is 0. +# Ref: https://github.com/kubernetes-sigs/kubespray/pull/9275#issuecomment-1246499242 +containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +containerd_runc_runtime: + name: runc + type: "io.containerd.runc.v2" + engine: "" + root: "" + base_runtime_spec: cri-base.json + options: + systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}" + +containerd_additional_runtimes: [] +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +containerd_base_runtime_spec_rlimit_nofile: 65535 + +containerd_default_base_runtime_spec_patch: + process: + rlimits: + - type: RLIMIT_NOFILE + hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}" + soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}" + +containerd_base_runtime_specs: + cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}" + +containerd_grpc_max_recv_message_size: 16777216 +containerd_grpc_max_send_message_size: 16777216 + +containerd_debug_level: "info" + +containerd_metrics_address: "" + +containerd_metrics_grpc_histogram: false + +containerd_registries: + "docker.io": "https://registry-1.docker.io" + +containerd_max_container_log_line_size: -1 + +# If enabled it will allow non root users to use port numbers <1024 +containerd_enable_unprivileged_ports: false +# If enabled it will allow non root users to use icmp sockets +containerd_enable_unprivileged_icmp: false + +containerd_cfg_dir: /etc/containerd + +# Extra config to be put in {{ containerd_cfg_dir }}/config.toml literally +containerd_extra_args: '' + +# Configure registry auth (if applicable to secure/insecure registries) +containerd_registry_auth: [] +# - registry: 10.0.0.2:5000 +# username: user +# password: pass + +# Configure containerd service +containerd_limit_proc_num: "infinity" +containerd_limit_core: "infinity" +containerd_limit_open_file_num: "infinity" +containerd_limit_mem_lock: "infinity" diff --git a/kubespray/roles/container-engine/containerd/handlers/main.yml b/kubespray/roles/container-engine/containerd/handlers/main.yml new file mode 100644 index 0000000..d2f1265 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/handlers/main.yml @@ -0,0 +1,21 @@ +--- +- name: restart containerd + command: /bin/true + notify: + - Containerd | restart containerd + - Containerd | wait for containerd + +- name: Containerd | restart containerd + systemd: + name: containerd + state: restarted + enabled: yes + daemon-reload: yes + masked: no + +- name: Containerd | wait for containerd + command: "{{ containerd_bin_dir }}/ctr images ls -q" + register: containerd_ready + retries: 8 + delay: 4 + until: containerd_ready.rc == 0 diff --git a/kubespray/roles/container-engine/containerd/handlers/reset.yml b/kubespray/roles/container-engine/containerd/handlers/reset.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/handlers/reset.yml @@ -0,0 +1 @@ +--- diff --git a/kubespray/roles/container-engine/containerd/meta/main.yml b/kubespray/roles/container-engine/containerd/meta/main.yml new file mode 100644 index 0000000..41c5b6a --- /dev/null +++ b/kubespray/roles/container-engine/containerd/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: container-engine/containerd-common + - role: container-engine/runc + - role: container-engine/crictl + - role: container-engine/nerdctl diff --git a/kubespray/roles/container-engine/containerd/molecule/default/converge.yml b/kubespray/roles/container-engine/containerd/molecule/default/converge.yml new file mode 100644 index 0000000..7847871 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd diff --git a/kubespray/roles/container-engine/containerd/molecule/default/molecule.yml b/kubespray/roles/container-engine/containerd/molecule/default/molecule.yml new file mode 100644 index 0000000..009b5aa --- /dev/null +++ b/kubespray/roles/container-engine/containerd/molecule/default/molecule.yml @@ -0,0 +1,49 @@ +--- +driver: + name: vagrant + provider: + name: libvirt +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: debian11 + box: generic/debian11 + cpus: 1 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/container-engine/containerd/molecule/default/prepare.yml b/kubespray/roles/container-engine/containerd/molecule/default/prepare.yml new file mode 100644 index 0000000..100673c --- /dev/null +++ b/kubespray/roles/container-engine/containerd/molecule/default/prepare.yml @@ -0,0 +1,28 @@ +--- +- name: Prepare + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: kubernetes/preinstall + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare CNI + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni diff --git a/kubespray/roles/container-engine/containerd/molecule/default/tests/test_default.py b/kubespray/roles/container-engine/containerd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..e1d9151 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/molecule/default/tests/test_default.py @@ -0,0 +1,55 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("containerd") + assert svc.is_running + assert svc.is_enabled + + +def test_version(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/containerd/containerd.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: containerd" in cmd.stdout + + +@pytest.mark.parametrize('image, dest', [ + ('quay.io/kubespray/hello-world:latest', '/tmp/hello-world.tar') +]) +def test_image_pull_save_load(host, image, dest): + nerdctl = "/usr/local/bin/nerdctl" + dest_file = host.file(dest) + + with host.sudo(): + pull_cmd = host.command(nerdctl + " pull " + image) + assert pull_cmd.rc ==0 + + with host.sudo(): + save_cmd = host.command(nerdctl + " save -o " + dest + " " + image) + assert save_cmd.rc == 0 + assert dest_file.exists + + with host.sudo(): + load_cmd = host.command(nerdctl + " load < " + dest) + assert load_cmd.rc == 0 + + +@pytest.mark.parametrize('image', [ + ('quay.io/kubespray/hello-world:latest') +]) +def test_run(host, image): + nerdctl = "/usr/local/bin/nerdctl" + + with host.sudo(): + cmd = host.command(nerdctl + " -n k8s.io run " + image) + assert cmd.rc == 0 + assert "Hello from Docker" in cmd.stdout diff --git a/kubespray/roles/container-engine/containerd/tasks/main.yml b/kubespray/roles/container-engine/containerd/tasks/main.yml new file mode 100644 index 0000000..03b9668 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/tasks/main.yml @@ -0,0 +1,124 @@ +--- +- name: Fail containerd setup if distribution is not supported + fail: + msg: "{{ ansible_distribution }} is not supported by containerd." + when: + - ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server", "UnionTech", "openEuler"] + +- name: containerd | Remove any package manager controlled containerd package + package: + name: "{{ containerd_package }}" + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + +- name: containerd | Remove containerd repository + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + +- name: containerd | Remove containerd repository + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + +- name: containerd | Download containerd + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.containerd) }}" + +- name: containerd | Unpack containerd archive + unarchive: + src: "{{ downloads.containerd.dest }}" + dest: "{{ containerd_bin_dir }}" + mode: 0755 + remote_src: yes + extra_opts: + - --strip-components=1 + notify: restart containerd + +- name: containerd | Remove orphaned binary + file: + path: "/usr/bin/{{ item }}" + state: absent + when: + - containerd_bin_dir != "/usr/bin" + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + with_items: + - containerd + - containerd-shim + - containerd-shim-runc-v1 + - containerd-shim-runc-v2 + - ctr + +- name: containerd | Generate systemd service for containerd + template: + src: containerd.service.j2 + dest: /etc/systemd/system/containerd.service + mode: 0644 + notify: restart containerd + +- name: containerd | Ensure containerd directories exist + file: + dest: "{{ item }}" + state: directory + mode: 0755 + owner: root + group: root + with_items: + - "{{ containerd_systemd_dir }}" + - "{{ containerd_cfg_dir }}" + - "{{ containerd_storage_dir }}" + - "{{ containerd_state_dir }}" + +- name: containerd | Write containerd proxy drop-in + template: + src: http-proxy.conf.j2 + dest: "{{ containerd_systemd_dir }}/http-proxy.conf" + mode: 0644 + notify: restart containerd + when: http_proxy is defined or https_proxy is defined + +- name: containerd | Generate default base_runtime_spec + register: ctr_oci_spec + command: "{{ containerd_bin_dir }}/ctr oci spec" + check_mode: false + changed_when: false + +- name: containerd | Store generated default base_runtime_spec + set_fact: + containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}" + +- name: containerd | Write base_runtime_specs + copy: + content: "{{ item.value }}" + dest: "{{ containerd_cfg_dir }}/{{ item.key }}" + owner: "root" + mode: 0644 + with_dict: "{{ containerd_base_runtime_specs | default({}) }}" + notify: restart containerd + +- name: containerd | Copy containerd config file + template: + src: config.toml.j2 + dest: "{{ containerd_cfg_dir }}/config.toml" + owner: "root" + mode: 0640 + notify: restart containerd + +# you can sometimes end up in a state where everything is installed +# but containerd was not started / enabled +- name: containerd | Flush handlers + meta: flush_handlers + +- name: containerd | Ensure containerd is started and enabled + systemd: + name: containerd + daemon_reload: yes + enabled: yes + state: started diff --git a/kubespray/roles/container-engine/containerd/tasks/reset.yml b/kubespray/roles/container-engine/containerd/tasks/reset.yml new file mode 100644 index 0000000..5c551b6 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/tasks/reset.yml @@ -0,0 +1,41 @@ +--- +- name: containerd | Remove containerd repository for RedHat os family + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + tags: + - reset_containerd + +- name: containerd | Remove containerd repository for Debian os family + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + tags: + - reset_containerd + +- name: containerd | Stop containerd service + service: + name: containerd + daemon_reload: true + enabled: false + masked: true + state: stopped + tags: + - reset_containerd + +- name: containerd | Remove configuration files + file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/containerd.service + - "{{ containerd_systemd_dir }}" + - "{{ containerd_cfg_dir }}" + - "{{ containerd_storage_dir }}" + - "{{ containerd_state_dir }}" + tags: + - reset_containerd diff --git a/kubespray/roles/container-engine/containerd/templates/config.toml.j2 b/kubespray/roles/container-engine/containerd/templates/config.toml.j2 new file mode 100644 index 0000000..c1bda12 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/templates/config.toml.j2 @@ -0,0 +1,79 @@ +version = 2 +root = "{{ containerd_storage_dir }}" +state = "{{ containerd_state_dir }}" +oom_score = {{ containerd_oom_score }} + +[grpc] + max_recv_message_size = {{ containerd_grpc_max_recv_message_size | default(16777216) }} + max_send_message_size = {{ containerd_grpc_max_send_message_size | default(16777216) }} + +[debug] + level = "{{ containerd_debug_level | default('info') }}" + +[metrics] + address = "{{ containerd_metrics_address | default('') }}" + grpc_histogram = {{ containerd_metrics_grpc_histogram | default(false) | lower }} + +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" + max_container_log_line_size = {{ containerd_max_container_log_line_size }} + enable_unprivileged_ports = {{ containerd_enable_unprivileged_ports | default(false) | lower }} + enable_unprivileged_icmp = {{ containerd_enable_unprivileged_icmp | default(false) | lower }} + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "{{ containerd_default_runtime | default('runc') }}" + snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] +{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}] + runtime_type = "{{ runtime.type }}" + runtime_engine = "{{ runtime.engine }}" + runtime_root = "{{ runtime.root }}" +{% if runtime.base_runtime_spec is defined %} + base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}" +{% endif %} + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options] +{% for key, value in runtime.options.items() %} + {{ key }} = {{ value }} +{% endfor %} +{% endfor %} +{% if kata_containers_enabled %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-qemu] + runtime_type = "io.containerd.kata-qemu.v2" +{% endif %} +{% if gvisor_enabled %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc] + runtime_type = "io.containerd.runsc.v1" +{% endif %} + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] +{% for registry, addr in containerd_registries.items() %} + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"] + endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"] +{% endfor %} +{% if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 %} +{% for registry, addr in containerd_insecure_registries.items() %} + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"] + endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"] +{% endfor %} +{% for addr in containerd_insecure_registries.values() | flatten | unique %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr }}".tls] + insecure_skip_verify = true +{% endfor %} +{% endif %} +{% for registry in containerd_registry_auth if registry['registry'] is defined %} +{% if (registry['username'] is defined and registry['password'] is defined) or registry['auth'] is defined %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry['registry'] }}".auth] +{% if registry['username'] is defined and registry['password'] is defined %} + password = "{{ registry['password'] }}" + username = "{{ registry['username'] }}" +{% else %} + auth = "{{ registry['auth'] }}" +{% endif %} +{% endif %} +{% endfor %} + +{% if containerd_extra_args is defined %} +{{ containerd_extra_args }} +{% endif %} diff --git a/kubespray/roles/container-engine/containerd/templates/containerd.service.j2 b/kubespray/roles/container-engine/containerd/templates/containerd.service.j2 new file mode 100644 index 0000000..adebcf2 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/templates/containerd.service.j2 @@ -0,0 +1,41 @@ +# Copyright The containerd Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[Unit] +Description=containerd container runtime +Documentation=https://containerd.io +After=network.target local-fs.target + +[Service] +ExecStartPre=-/sbin/modprobe overlay +ExecStart={{ containerd_bin_dir }}/containerd + +Type=notify +Delegate=yes +KillMode=process +Restart=always +RestartSec=5 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC={{ containerd_limit_proc_num }} +LimitCORE={{ containerd_limit_core }} +LimitNOFILE={{ containerd_limit_open_file_num }} +LimitMEMLOCK={{ containerd_limit_mem_lock }} +# Comment TasksMax if your systemd version does not supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/container-engine/containerd/templates/http-proxy.conf.j2 b/kubespray/roles/container-engine/containerd/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..212f30f --- /dev/null +++ b/kubespray/roles/container-engine/containerd/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/roles/container-engine/containerd/vars/debian.yml b/kubespray/roles/container-engine/containerd/vars/debian.yml new file mode 100644 index 0000000..99dc4a5 --- /dev/null +++ b/kubespray/roles/container-engine/containerd/vars/debian.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + {{ containerd_debian_repo_component }} diff --git a/kubespray/roles/container-engine/containerd/vars/ubuntu.yml b/kubespray/roles/container-engine/containerd/vars/ubuntu.yml new file mode 100644 index 0000000..ccce96d --- /dev/null +++ b/kubespray/roles/container-engine/containerd/vars/ubuntu.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + {{ containerd_ubuntu_repo_component }} diff --git a/kubespray/roles/container-engine/cri-dockerd/handlers/main.yml b/kubespray/roles/container-engine/cri-dockerd/handlers/main.yml new file mode 100644 index 0000000..9d9d8c6 --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/handlers/main.yml @@ -0,0 +1,35 @@ +--- +- name: restart and enable cri-dockerd + command: /bin/true + notify: + - cri-dockerd | reload systemd + - cri-dockerd | restart docker.service + - cri-dockerd | reload cri-dockerd.socket + - cri-dockerd | reload cri-dockerd.service + - cri-dockerd | enable cri-dockerd service + +- name: cri-dockerd | reload systemd + systemd: + name: cri-dockerd + daemon_reload: true + masked: no + +- name: cri-dockerd | restart docker.service + service: + name: docker.service + state: restarted + +- name: cri-dockerd | reload cri-dockerd.socket + service: + name: cri-dockerd.socket + state: restarted + +- name: cri-dockerd | reload cri-dockerd.service + service: + name: cri-dockerd.service + state: restarted + +- name: cri-dockerd | enable cri-dockerd service + service: + name: cri-dockerd.service + enabled: yes diff --git a/kubespray/roles/container-engine/cri-dockerd/meta/main.yml b/kubespray/roles/container-engine/cri-dockerd/meta/main.yml new file mode 100644 index 0000000..4923f3b --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - role: container-engine/docker + - role: container-engine/crictl diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/converge.yml b/kubespray/roles/container-engine/cri-dockerd/molecule/default/converge.yml new file mode 100644 index 0000000..be6fa38 --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + container_manager: docker + roles: + - role: kubespray-defaults + - role: container-engine/cri-dockerd diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf b/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/container.json b/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/container.json new file mode 100644 index 0000000..1d839e6 --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "cri-dockerd1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "cri-dockerd1.0.log", + "linux": {} +} diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json b/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json new file mode 100644 index 0000000..f451e9e --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "cri-dockerd1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/molecule.yml b/kubespray/roles/container-engine/cri-dockerd/molecule/default/molecule.yml new file mode 100644 index 0000000..c82ddba --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/prepare.yml b/kubespray/roles/container-engine/cri-dockerd/molecule/default/prepare.yml new file mode 100644 index 0000000..c54feac --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/prepare.yml @@ -0,0 +1,47 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py b/kubespray/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..dc99b34 --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py @@ -0,0 +1,19 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run_pod(host): + run_command = "/usr/local/bin/crictl run --with-pull /tmp/container.json /tmp/sandbox.json" + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/cri-dockerd1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/roles/container-engine/cri-dockerd/tasks/main.yml b/kubespray/roles/container-engine/cri-dockerd/tasks/main.yml new file mode 100644 index 0000000..9ce3ec6 --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: runc | Download cri-dockerd binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cri_dockerd) }}" + +- name: Copy cri-dockerd binary from download dir + copy: + src: "{{ local_release_dir }}/cri-dockerd" + dest: "{{ bin_dir }}/cri-dockerd" + mode: 0755 + remote_src: true + notify: + - restart and enable cri-dockerd + +- name: Generate cri-dockerd systemd unit files + template: + src: "{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + mode: 0644 + with_items: + - cri-dockerd.service + - cri-dockerd.socket + notify: + - restart and enable cri-dockerd + +- name: Flush handlers + meta: flush_handlers diff --git a/kubespray/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 b/kubespray/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 new file mode 100644 index 0000000..078f666 --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/templates/cri-dockerd.service.j2 @@ -0,0 +1,40 @@ +[Unit] +Description=CRI Interface for Docker Application Container Engine +Documentation=https://docs.mirantis.com +After=network-online.target firewalld.service docker.service +Wants=network-online.target docker.service +Requires=cri-dockerd.socket + +[Service] +Type=notify +ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %} + +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always + +# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. +# Both the old, and new location are accepted by systemd 229 and up, so using the old location +# to make them work for either version of systemd. +StartLimitBurst=3 + +# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. +# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make +# this option work for either version of systemd. +StartLimitInterval=60s + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not support it. +# Only systemd 226 and above support this option. +TasksMax=infinity +Delegate=yes +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2 b/kubespray/roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2 new file mode 100644 index 0000000..8dfa27d --- /dev/null +++ b/kubespray/roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=CRI Docker Socket for the API +PartOf=cri-dockerd.service + +[Socket] +ListenStream=%t/cri-dockerd.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/kubespray/roles/container-engine/cri-o/defaults/main.yml b/kubespray/roles/container-engine/cri-o/defaults/main.yml new file mode 100644 index 0000000..6b757fe --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/defaults/main.yml @@ -0,0 +1,103 @@ +--- + +crio_cgroup_manager: "{{ kubelet_cgroup_driver | default('systemd') }}" +crio_conmon: "{{ bin_dir }}/conmon" +crio_enable_metrics: false +crio_log_level: "info" +crio_metrics_port: "9090" +crio_pause_image: "{{ pod_infra_image_repo }}:{{ pod_infra_version }}" + +# Registries defined within cri-o. +# By default unqualified images are not allowed for security reasons +crio_registries: [] +# - prefix: docker.io +# insecure: false +# blocked: false +# location: registry-1.docker.io ## REQUIRED +# unqualified: false +# mirrors: +# - location: 172.20.100.52:5000 +# insecure: true +# - location: mirror.gcr.io +# insecure: false + +crio_registry_auth: [] +# - registry: 10.0.0.2:5000 +# username: user +# password: pass + +crio_seccomp_profile: "" +crio_selinux: "{{ (preinstall_selinux_state == 'enforcing')|lower }}" +crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}" + +# Override system default for storage driver +# crio_storage_driver: "overlay" + +crio_stream_port: "10010" + +crio_required_version: "{{ kube_version | regex_replace('^v(?P\\d+).(?P\\d+).(?P\\d+)$', '\\g.\\g') }}" + +# The crio_runtimes variable defines a list of OCI compatible runtimes. +crio_runtimes: + - name: runc + path: "{{ bin_dir }}/runc" + type: oci + root: /run/runc + +# Kata Containers is an OCI runtime, where containers are run inside lightweight +# VMs. Kata provides additional isolation towards the host, minimizing the host attack +# surface and mitigating the consequences of containers breakout. +kata_runtimes: + # Kata Containers with the default configured VMM + - name: kata-qemu + path: /usr/local/bin/containerd-shim-kata-qemu-v2 + type: vm + root: /run/kata-containers + privileged_without_host_devices: true + +# crun is a fast and low-memory footprint OCI Container Runtime fully written in C. +crun_runtime: + name: crun + path: "{{ bin_dir }}/crun" + type: oci + root: /run/crun + +# youki is an implementation of the OCI runtime-spec in Rust, similar to runc. +youki_runtime: + name: youki + path: "{{ youki_bin_dir }}/youki" + type: oci + root: /run/youki + +# TODO(cristicalin): remove this after 2.21 +crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" + +# Configure the cri-o pids limit, increase this for heavily multi-threaded workloads +# see https://github.com/cri-o/cri-o/issues/1921 +crio_pids_limit: 1024 + +# Reserve 16M uids and gids for user namespaces (256 pods * 65536 uids/gids) +# at the end of the uid/gid space +crio_remap_enable: false +crio_remap_user: containers +crio_subuid_start: 2130706432 +crio_subuid_length: 16777216 +crio_subgid_start: 2130706432 +crio_subgid_length: 16777216 + +# cri-o binary files +crio_bin_files: + - conmon + - crio + - crio-status + - pinns + +# cri-o manual files +crio_man_files: + 5: + - crio.conf + - crio.conf.d + 8: + - crio + - crio-status diff --git a/kubespray/roles/container-engine/cri-o/files/mounts.conf b/kubespray/roles/container-engine/cri-o/files/mounts.conf new file mode 100644 index 0000000..b7cde9d --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/files/mounts.conf @@ -0,0 +1 @@ +/usr/share/rhel/secrets:/run/secrets diff --git a/kubespray/roles/container-engine/cri-o/handlers/main.yml b/kubespray/roles/container-engine/cri-o/handlers/main.yml new file mode 100644 index 0000000..8bc936b --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/handlers/main.yml @@ -0,0 +1,16 @@ +--- +- name: restart crio + command: /bin/true + notify: + - CRI-O | reload systemd + - CRI-O | reload crio + +- name: CRI-O | reload systemd + systemd: + daemon_reload: true + +- name: CRI-O | reload crio + service: + name: crio + state: restarted + enabled: yes diff --git a/kubespray/roles/container-engine/cri-o/meta/main.yml b/kubespray/roles/container-engine/cri-o/meta/main.yml new file mode 100644 index 0000000..3304f70 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: container-engine/crictl + - role: container-engine/runc + - role: container-engine/skopeo diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/converge.yml b/kubespray/roles/container-engine/cri-o/molecule/default/converge.yml new file mode 100644 index 0000000..376f07c --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + container_manager: crio + roles: + - role: kubespray-defaults + - role: container-engine/cri-o diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf b/kubespray/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/files/container.json b/kubespray/roles/container-engine/cri-o/molecule/default/files/container.json new file mode 100644 index 0000000..bcd71e7 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "runc1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "runc1.0.log", + "linux": {} +} diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/files/sandbox.json b/kubespray/roles/container-engine/cri-o/molecule/default/files/sandbox.json new file mode 100644 index 0000000..eb9dcb9 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "runc1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/molecule.yml b/kubespray/roles/container-engine/cri-o/molecule/default/molecule.yml new file mode 100644 index 0000000..163eb8e --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/molecule.yml @@ -0,0 +1,57 @@ +--- +driver: + name: vagrant + provider: + name: libvirt +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: almalinux8 + box: almalinux/8 + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: fedora + box: fedora/36-cloud-base + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster + - name: debian10 + box: generic/debian10 + cpus: 2 + memory: 1024 + groups: + - kube_control_plane + - kube_node + - k8s_cluster +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/prepare.yml b/kubespray/roles/container-engine/cri-o/molecule/default/prepare.yml new file mode 100644 index 0000000..ec47a1e --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/prepare.yml @@ -0,0 +1,52 @@ +--- +- name: Prepare + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: kubernetes/preinstall + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare CNI + hosts: all + gather_facts: False + become: true + vars: + ignore_assert_errors: true + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/roles/container-engine/cri-o/molecule/default/tests/test_default.py b/kubespray/roles/container-engine/cri-o/molecule/default/tests/test_default.py new file mode 100644 index 0000000..358a1b7 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/molecule/default/tests/test_default.py @@ -0,0 +1,35 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("crio") + assert svc.is_running + assert svc.is_enabled + + +def test_run(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/crio/crio.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: cri-o" in cmd.stdout + +def test_run_pod(host): + runtime = "runc" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/runc1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/roles/container-engine/cri-o/tasks/cleanup.yaml b/kubespray/roles/container-engine/cri-o/tasks/cleanup.yaml new file mode 100644 index 0000000..28c0c3a --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/tasks/cleanup.yaml @@ -0,0 +1,119 @@ +--- +# TODO(cristicalin): drop this file after 2.21 +- name: CRI-O kubic repo name for debian os family + set_fact: + crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" + when: ansible_os_family == "Debian" + +- name: Remove legacy CRI-O kubic apt repo key + apt_key: + url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key" + state: absent + when: crio_kubic_debian_repo_name is defined + +- name: Remove legacy CRI-O kubic apt repo + apt_repository: + repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /" + state: absent + filename: devel-kubic-libcontainers-stable + when: crio_kubic_debian_repo_name is defined + +- name: Remove legacy CRI-O kubic cri-o apt repo + apt_repository: + repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /" + state: absent + filename: devel-kubic-libcontainers-stable-cri-o + when: crio_kubic_debian_repo_name is defined + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever) + baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/ + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution not in ["Amazon", "Fedora"] + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + description: "CRI-O {{ crio_version }} (CentOS_$releasever)" + baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/" + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution not in ["Amazon", "Fedora"] + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + description: Stable Releases of Upstream github.com/containers packages + baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/ + state: absent + when: + - ansible_distribution in ["Fedora"] + - not is_ostree + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + description: "CRI-O {{ crio_version }}" + baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/" + state: absent + when: + - ansible_distribution in ["Fedora"] + - not is_ostree + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + description: Stable Releases of Upstream github.com/containers packages + baseurl: http://{{ crio_download_base }}/CentOS_7/ + state: absent + when: ansible_distribution in ["Amazon"] + +- name: Remove legacy CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + description: "CRI-O {{ crio_version }}" + baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/" + state: absent + when: ansible_distribution in ["Amazon"] + +- name: Disable modular repos for CRI-O + ini_file: + path: "/etc/yum.repos.d/{{ item.repo }}.repo" + section: "{{ item.section }}" + option: enabled + value: 0 + mode: 0644 + become: true + when: is_ostree + loop: + - repo: "fedora-updates-modular" + section: "updates-modular" + - repo: "fedora-modular" + section: "fedora-modular" + +# Disable any older module version if we enabled them before +- name: Disable CRI-O ex module + command: "rpm-ostree ex module disable cri-o:{{ item }}" + become: true + when: + - is_ostree + - ostree_version is defined and ostree_version.stdout is version('2021.9', '>=') + with_items: + - 1.22 + - 1.23 + - 1.24 + +- name: cri-o | remove installed packages + package: + name: "{{ item }}" + state: absent + when: not is_ostree + with_items: + - cri-o + - cri-o-runc + - oci-systemd-hook diff --git a/kubespray/roles/container-engine/cri-o/tasks/main.yaml b/kubespray/roles/container-engine/cri-o/tasks/main.yaml new file mode 100644 index 0000000..89aab56 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/tasks/main.yaml @@ -0,0 +1,206 @@ +--- +- name: cri-o | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: cri-o | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: cri-o | get ostree version + shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'" + args: + executable: /bin/bash + register: ostree_version + when: is_ostree + +- name: cri-o | Download cri-o + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.crio) }}" + +- name: cri-o | special handling for amazon linux + import_tasks: "setup-amazon.yaml" + when: ansible_distribution in ["Amazon"] + +- name: cri-o | clean up reglacy repos + import_tasks: "cleanup.yaml" + +- name: cri-o | build a list of crio runtimes with Katacontainers runtimes + set_fact: + crio_runtimes: "{{ crio_runtimes + kata_runtimes }}" + when: + - kata_containers_enabled + +- name: cri-o | build a list of crio runtimes with crun runtime + set_fact: + crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}" + when: + - crun_enabled + +- name: cri-o | build a list of crio runtimes with youki runtime + set_fact: + crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}" + when: + - youki_enabled + +- name: cri-o | make sure needed folders exist in the system + with_items: + - /etc/crio + - /etc/containers + - /etc/systemd/system/crio.service.d + file: + path: "{{ item }}" + state: directory + mode: 0755 + +- name: cri-o | install cri-o config + template: + src: crio.conf.j2 + dest: /etc/crio/crio.conf + mode: 0644 + register: config_install + +- name: cri-o | install config.json + template: + src: config.json.j2 + dest: /etc/crio/config.json + mode: 0644 + register: reg_auth_install + +- name: cri-o | copy binaries + copy: + src: "{{ local_release_dir }}/cri-o/bin/{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: true + with_items: + - "{{ crio_bin_files }}" + notify: restart crio + +- name: cri-o | copy service file + copy: + src: "{{ local_release_dir }}/cri-o/contrib/crio.service" + dest: /etc/systemd/system/crio.service + mode: 0755 + remote_src: true + notify: restart crio + +- name: cri-o | copy default policy + copy: + src: "{{ local_release_dir }}/cri-o/contrib/policy.json" + dest: /etc/containers/policy.json + mode: 0755 + remote_src: true + notify: restart crio + +- name: cri-o | copy mounts.conf + copy: + src: mounts.conf + dest: /etc/containers/mounts.conf + mode: 0644 + when: + - ansible_os_family == 'RedHat' + notify: restart crio + +- name: cri-o | create directory for oci hooks + file: + path: /etc/containers/oci/hooks.d + state: directory + owner: root + mode: 0755 + +- name: cri-o | set overlay driver + ini_file: + dest: /etc/containers/storage.conf + section: storage + option: "{{ item.option }}" + value: "{{ item.value }}" + mode: 0644 + with_items: + - option: driver + value: '"overlay"' + - option: graphroot + value: '"/var/lib/containers/storage"' + +# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel +- name: cri-o | set metacopy mount options correctly + ini_file: + dest: /etc/containers/storage.conf + section: storage.options.overlay + option: mountopt + value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}' + mode: 0644 + +- name: cri-o | create directory registries configs + file: + path: /etc/containers/registries.conf.d + state: directory + owner: root + mode: 0755 + +- name: cri-o | write registries configs + template: + src: registry.conf.j2 + dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf" + mode: 0644 + loop: "{{ crio_registries }}" + notify: restart crio + +- name: cri-o | configure unqualified registry settings + template: + src: unqualified.conf.j2 + dest: "/etc/containers/registries.conf.d/01-unqualified.conf" + mode: 0644 + notify: restart crio + +- name: cri-o | write cri-o proxy drop-in + template: + src: http-proxy.conf.j2 + dest: /etc/systemd/system/crio.service.d/http-proxy.conf + mode: 0644 + notify: restart crio + when: http_proxy is defined or https_proxy is defined + +- name: cri-o | configure the uid/gid space for user namespaces + lineinfile: + path: '{{ item.path }}' + line: '{{ item.entry }}' + regex: '^\s*{{ crio_remap_user }}:' + state: '{{ "present" if crio_remap_enable | bool else "absent" }}' + loop: + - path: /etc/subuid + entry: '{{ crio_remap_user }}:{{ crio_subuid_start }}:{{ crio_subuid_length }}' + - path: /etc/subgid + entry: '{{ crio_remap_user }}:{{ crio_subgid_start }}:{{ crio_subgid_length }}' + loop_control: + label: '{{ item.path }}' + +- name: cri-o | ensure crio service is started and enabled + service: + name: crio + daemon_reload: true + enabled: true + state: started + register: service_start + +- name: cri-o | trigger service restart only when needed + service: # noqa 503 + name: crio + state: restarted + when: + - config_install.changed + - reg_auth_install.changed + - not service_start.changed + +- name: cri-o | verify that crio is running + command: "{{ bin_dir }}/crio-status info" + register: get_crio_info + until: get_crio_info is succeeded + changed_when: false + retries: 5 + delay: "{{ retry_stagger | random + 3 }}" diff --git a/kubespray/roles/container-engine/cri-o/tasks/reset.yml b/kubespray/roles/container-engine/cri-o/tasks/reset.yml new file mode 100644 index 0000000..f5e0e54 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/tasks/reset.yml @@ -0,0 +1,101 @@ +--- +- name: CRI-O | Kubic repo name for debian os family + set_fact: + crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}" + when: ansible_os_family == "Debian" + tags: + - reset_crio + +- name: CRI-O | Remove kubic apt repo + apt_repository: + repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /" + state: absent + when: crio_kubic_debian_repo_name is defined + tags: + - reset_crio + +- name: CRI-O | Remove cri-o apt repo + apt_repository: + repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /" + state: present + filename: devel-kubic-libcontainers-stable-cri-o + when: crio_kubic_debian_repo_name is defined + tags: + - reset_crio + +- name: CRI-O | Remove CRI-O kubic yum repo + yum_repository: + name: devel_kubic_libcontainers_stable + state: absent + when: ansible_distribution in ["Amazon"] + tags: + - reset_crio + +- name: CRI-O | Remove CRI-O kubic yum repo + yum_repository: + name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}" + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution not in ["Amazon", "Fedora"] + tags: + - reset_crio + +- name: CRI-O | Run yum-clean-metadata + command: yum clean metadata + args: + warn: no + when: + - ansible_os_family == "RedHat" + tags: + - reset_crio + +- name: CRI-O | Remove crictl + file: + name: "{{ item }}" + state: absent + loop: + - /etc/crictl.yaml + - "{{ bin_dir }}/crictl" + tags: + - reset_crio + +- name: CRI-O | Stop crio service + service: + name: crio + daemon_reload: true + enabled: false + masked: true + state: stopped + tags: + - reset_crio + +- name: CRI-O | Remove CRI-O configuration files + file: + name: "{{ item }}" + state: absent + loop: + - /etc/crio + - /etc/containers + - /etc/systemd/system/crio.service.d + tags: + - reset_crio + +- name: CRI-O | Remove dpkg hold + dpkg_selections: + name: "{{ item }}" + selection: install + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: "{{ crio_packages }}" + tags: + - reset_crio + +- name: CRI-O | Uninstall CRI-O package + package: + name: "{{ item }}" + state: absent + when: not is_ostree + with_items: "{{ crio_packages }}" + tags: + - reset_crio diff --git a/kubespray/roles/container-engine/cri-o/tasks/setup-amazon.yaml b/kubespray/roles/container-engine/cri-o/tasks/setup-amazon.yaml new file mode 100644 index 0000000..3690367 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/tasks/setup-amazon.yaml @@ -0,0 +1,38 @@ +--- +- name: Check that amzn2-extras.repo exists + stat: + path: /etc/yum.repos.d/amzn2-extras.repo + register: amzn2_extras_file_stat + +- name: Find docker repo in amzn2-extras.repo file + lineinfile: + dest: /etc/yum.repos.d/amzn2-extras.repo + line: "[amzn2extra-docker]" + check_mode: yes + register: amzn2_extras_docker_repo + when: + - amzn2_extras_file_stat.stat.exists + +- name: Remove docker repository + ini_file: + dest: /etc/yum.repos.d/amzn2-extras.repo + section: amzn2extra-docker + option: enabled + value: "0" + backup: yes + mode: 0644 + when: + - amzn2_extras_file_stat.stat.exists + - not amzn2_extras_docker_repo.changed + +- name: Add container-selinux yum repo + yum_repository: + name: copr:copr.fedorainfracloud.org:lsm5:container-selinux + file: _copr_lsm5-container-selinux.repo + description: Copr repo for container-selinux owned by lsm5 + baseurl: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/epel-7-$basearch/ + gpgcheck: yes + gpgkey: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/pubkey.gpg + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no diff --git a/kubespray/roles/container-engine/cri-o/templates/config.json.j2 b/kubespray/roles/container-engine/cri-o/templates/config.json.j2 new file mode 100644 index 0000000..522ade7 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/templates/config.json.j2 @@ -0,0 +1,17 @@ +{% if crio_registry_auth is defined and crio_registry_auth|length %} +{ +{% for reg in crio_registry_auth %} + "auths": { + "{{ reg.registry }}": { + "auth": "{{ (reg.username + ':' + reg.password) | string | b64encode }}" + } +{% if not loop.last %} + }, +{% else %} + } +{% endif %} +{% endfor %} +} +{% else %} +{} +{% endif %} diff --git a/kubespray/roles/container-engine/cri-o/templates/crio.conf.j2 b/kubespray/roles/container-engine/cri-o/templates/crio.conf.j2 new file mode 100644 index 0000000..1a25e09 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/templates/crio.conf.j2 @@ -0,0 +1,369 @@ +# The CRI-O configuration file specifies all of the available configuration +# options and command-line flags for the crio(8) OCI Kubernetes Container Runtime +# daemon, but in a TOML format that can be more easily modified and versioned. +# +# Please refer to crio.conf(5) for details of all configuration options. + +# CRI-O supports partial configuration reload during runtime, which can be +# done by sending SIGHUP to the running process. Currently supported options +# are explicitly mentioned with: 'This option supports live configuration +# reload'. + +# CRI-O reads its storage defaults from the containers-storage.conf(5) file +# located at /etc/containers/storage.conf. Modify this storage configuration if +# you want to change the system's defaults. If you want to modify storage just +# for CRI-O, you can change the storage configuration options here. +[crio] + +# Path to the "root directory". CRI-O stores all of its data, including +# containers images, in this directory. +root = "/var/lib/containers/storage" + +# Path to the "run directory". CRI-O stores all of its state in this directory. +runroot = "/var/run/containers/storage" + +# Storage driver used to manage the storage of images and containers. Please +# refer to containers-storage.conf(5) to see all available storage drivers. +{% if crio_storage_driver is defined %} +storage_driver = "{{ crio_storage_driver }}" +{% endif %} + +# List to pass options to the storage driver. Please refer to +# containers-storage.conf(5) to see all available storage options. +#storage_option = [ +#] + +# The default log directory where all logs will go unless directly specified by +# the kubelet. The log directory specified must be an absolute directory. +log_dir = "/var/log/crio/pods" + +# Location for CRI-O to lay down the temporary version file. +# It is used to check if crio wipe should wipe containers, which should +# always happen on a node reboot +version_file = "/var/run/crio/version" + +# Location for CRI-O to lay down the persistent version file. +# It is used to check if crio wipe should wipe images, which should +# only happen when CRI-O has been upgraded +version_file_persist = "/var/lib/crio/version" + +# The crio.api table contains settings for the kubelet/gRPC interface. +[crio.api] + +# Path to AF_LOCAL socket on which CRI-O will listen. +listen = "/var/run/crio/crio.sock" + +# IP address on which the stream server will listen. +stream_address = "127.0.0.1" + +# The port on which the stream server will listen. If the port is set to "0", then +# CRI-O will allocate a random free port number. +stream_port = "{{ crio_stream_port }}" + +# Enable encrypted TLS transport of the stream server. +stream_enable_tls = false + +# Path to the x509 certificate file used to serve the encrypted stream. This +# file can change, and CRI-O will automatically pick up the changes within 5 +# minutes. +stream_tls_cert = "" + +# Path to the key file used to serve the encrypted stream. This file can +# change and CRI-O will automatically pick up the changes within 5 minutes. +stream_tls_key = "" + +# Path to the x509 CA(s) file used to verify and authenticate client +# communication with the encrypted stream. This file can change and CRI-O will +# automatically pick up the changes within 5 minutes. +stream_tls_ca = "" + +# Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024. +grpc_max_send_msg_size = 16777216 + +# Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024. +grpc_max_recv_msg_size = 16777216 + +# The crio.runtime table contains settings pertaining to the OCI runtime used +# and options for how to set up and manage the OCI runtime. +[crio.runtime] + +# A list of ulimits to be set in containers by default, specified as +# "=:", for example: +# "nofile=1024:2048" +# If nothing is set here, settings will be inherited from the CRI-O daemon +#default_ulimits = [ +#] + +# default_runtime is the _name_ of the OCI runtime to be used as the default. +# The name is matched against the runtimes map below. +default_runtime = "runc" + +# If true, the runtime will not use pivot_root, but instead use MS_MOVE. +no_pivot = false + +# decryption_keys_path is the path where the keys required for +# image decryption are stored. This option supports live configuration reload. +decryption_keys_path = "/etc/crio/keys/" + +# Path to the conmon binary, used for monitoring the OCI runtime. +# Will be searched for using $PATH if empty. +conmon = "{{ crio_conmon }}" + +# Cgroup setting for conmon +{% if crio_cgroup_manager == "cgroupfs" %} +conmon_cgroup = "pod" +{% else %} +conmon_cgroup = "system.slice" +{% endif %} + +# Environment variable list for the conmon process, used for passing necessary +# environment variables to conmon or the runtime. +conmon_env = [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +] + +# Additional environment variables to set for all the +# containers. These are overridden if set in the +# container image spec or in the container runtime configuration. +default_env = [ +] + +# If true, SELinux will be used for pod separation on the host. +selinux = {{ crio_selinux }} + +# Path to the seccomp.json profile which is used as the default seccomp profile +# for the runtime. If not specified, then the internal default seccomp profile +# will be used. This option supports live configuration reload. +seccomp_profile = "{{ crio_seccomp_profile }}" + +# Used to change the name of the default AppArmor profile of CRI-O. The default +# profile name is "crio-default". This profile only takes effect if the user +# does not specify a profile via the Kubernetes Pod's metadata annotation. If +# the profile is set to "unconfined", then this equals to disabling AppArmor. +# This option supports live configuration reload. +# apparmor_profile = "crio-default" + +# Cgroup management implementation used for the runtime. +cgroup_manager = "{{ crio_cgroup_manager }}" + +# List of default capabilities for containers. If it is empty or commented out, +# only the capabilities defined in the containers json file by the user/kube +# will be added. +default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "NET_RAW", + "SETGID", + "SETUID", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", +] + +# List of default sysctls. If it is empty or commented out, only the sysctls +# defined in the container json file by the user/kube will be added. +default_sysctls = [ +] + +# List of additional devices. specified as +# "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". +#If it is empty or commented out, only the devices +# defined in the container json file by the user/kube will be added. +additional_devices = [ +] + +# Path to OCI hooks directories for automatically executed hooks. If one of the +# directories does not exist, then CRI-O will automatically skip them. +hooks_dir = [ + "/usr/share/containers/oci/hooks.d", +] + +# List of default mounts for each container. **Deprecated:** this option will +# be removed in future versions in favor of default_mounts_file. +default_mounts = [ +] + +# Path to the file specifying the defaults mounts for each container. The +# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads +# its default mounts from the following two files: +# +# 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the +# override file, where users can either add in their own default mounts, or +# override the default mounts shipped with the package. +# +# 2) /usr/share/containers/mounts.conf: This is the default file read for +# mounts. If you want CRI-O to read from a different, specific mounts file, +# you can change the default_mounts_file. Note, if this is done, CRI-O will +# only add mounts it finds in this file. +# +#default_mounts_file = "" + +# Maximum number of processes allowed in a container. +pids_limit = {{ crio_pids_limit }} + +# Maximum sized allowed for the container log file. Negative numbers indicate +# that no size limit is imposed. If it is positive, it must be >= 8192 to +# match/exceed conmon's read buffer. The file is truncated and re-opened so the +# limit is never exceeded. +log_size_max = -1 + +# Whether container output should be logged to journald in addition to the kuberentes log file +log_to_journald = false + +# Path to directory in which container exit files are written to by conmon. +container_exits_dir = "/var/run/crio/exits" + +# Path to directory for container attach sockets. +container_attach_socket_dir = "/var/run/crio" + +# The prefix to use for the source of the bind mounts. +bind_mount_prefix = "" + +# If set to true, all containers will run in read-only mode. +read_only = false + +# Changes the verbosity of the logs based on the level it is set to. Options +# are fatal, panic, error, warn, info, debug and trace. This option supports +# live configuration reload. +log_level = "{{ crio_log_level }}" + +# Filter the log messages by the provided regular expression. +# This option supports live configuration reload. +log_filter = "" + +# The UID mappings for the user namespace of each container. A range is +# specified in the form containerUID:HostUID:Size. Multiple ranges must be +# separated by comma. +uid_mappings = "" + +# The GID mappings for the user namespace of each container. A range is +# specified in the form containerGID:HostGID:Size. Multiple ranges must be +# separated by comma. +gid_mappings = "" + +# The minimal amount of time in seconds to wait before issuing a timeout +# regarding the proper termination of the container. The lowest possible +# value is 30s, whereas lower values are not considered by CRI-O. +ctr_stop_timeout = 30 + +# **DEPRECATED** this option is being replaced by manage_ns_lifecycle, which is described below. +# manage_network_ns_lifecycle = false + +# manage_ns_lifecycle determines whether we pin and remove namespaces +# and manage their lifecycle +{% if kata_containers_enabled %} +manage_ns_lifecycle = true +{% else %} +manage_ns_lifecycle = false +{% endif %} + +# The directory where the state of the managed namespaces gets tracked. +# Only used when manage_ns_lifecycle is true. +namespaces_dir = "/var/run" + +# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle +pinns_path = "" + +# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. +# The runtime to use is picked based on the runtime_handler provided by the CRI. +# If no runtime_handler is provided, the runtime will be picked based on the level +# of trust of the workload. Each entry in the table should follow the format: +# +#[crio.runtime.runtimes.runtime-handler] +# runtime_path = "/path/to/the/executable" +# runtime_type = "oci" +# runtime_root = "/path/to/the/root" +# +# Where: +# - runtime-handler: name used to identify the runtime +# - runtime_path (optional, string): absolute path to the runtime executable in +# the host filesystem. If omitted, the runtime-handler identifier should match +# the runtime executable name, and the runtime executable should be placed +# in $PATH. +# - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If +# omitted, an "oci" runtime is assumed. +# - runtime_root (optional, string): root directory for storage of containers +# state. + +{% for runtime in crio_runtimes %} +[crio.runtime.runtimes.{{ runtime.name }}] +runtime_path = "{{ runtime.path }}" +runtime_type = "{{ runtime.type }}" +runtime_root = "{{ runtime.root }}" +privileged_without_host_devices = {{ runtime.privileged_without_host_devices|default(false)|lower }} +allowed_annotations = {{ runtime.allowed_annotations|default([])|to_json }} +{% endfor %} + +# Kata Containers with the Firecracker VMM +#[crio.runtime.runtimes.kata-fc] + +# The crio.image table contains settings pertaining to the management of OCI images. +# +# CRI-O reads its configured registries defaults from the system wide +# containers-registries.conf(5) located in /etc/containers/registries.conf. If +# you want to modify just CRI-O, you can change the registries configuration in +# this file. Otherwise, leave insecure_registries and registries commented out to +# use the system's defaults from /etc/containers/registries.conf. +[crio.image] + +# Default transport for pulling images from a remote container storage. +default_transport = "docker://" + +# The path to a file containing credentials necessary for pulling images from +# secure registries. The file is similar to that of /var/lib/kubelet/config.json +global_auth_file = "/etc/crio/config.json" + +# The image used to instantiate infra containers. +# This option supports live configuration reload. +pause_image = "{{ crio_pause_image }}" + +# The path to a file containing credentials specific for pulling the pause_image from +# above. The file is similar to that of /var/lib/kubelet/config.json +# This option supports live configuration reload. +pause_image_auth_file = "" + +# The command to run to have a container stay in the paused state. +# When explicitly set to "", it will fallback to the entrypoint and command +# specified in the pause image. When commented out, it will fallback to the +# default: "/pause". This option supports live configuration reload. +pause_command = "/pause" + +# Path to the file which decides what sort of policy we use when deciding +# whether or not to trust an image that we've pulled. It is not recommended that +# this option be used, as the default behavior of using the system-wide default +# policy (i.e., /etc/containers/policy.json) is most often preferred. Please +# refer to containers-policy.json(5) for more details. +signature_policy = "{{ crio_signature_policy }}" + +# Controls how image volumes are handled. The valid values are mkdir, bind and +# ignore; the latter will ignore volumes entirely. +image_volumes = "mkdir" + +# The crio.network table containers settings pertaining to the management of +# CNI plugins. +[crio.network] + +# The default CNI network name to be selected. If not set or "", then +# CRI-O will pick-up the first one found in network_dir. +# cni_default_network = "" + +# Path to the directory where CNI configuration files are located. +network_dir = "/etc/cni/net.d/" + +# Paths to directories where CNI plugin binaries are located. +plugin_dirs = [ + "/opt/cni/bin", + "/usr/libexec/cni", +] + +# A necessary configuration for Prometheus based metrics retrieval +[crio.metrics] + +# Globally enable or disable metrics support. +enable_metrics = {{ crio_enable_metrics | bool | lower }} + +# The port on which the metrics server will listen. +metrics_port = {{ crio_metrics_port }} diff --git a/kubespray/roles/container-engine/cri-o/templates/http-proxy.conf.j2 b/kubespray/roles/container-engine/cri-o/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..212f30f --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/roles/container-engine/cri-o/templates/registry.conf.j2 b/kubespray/roles/container-engine/cri-o/templates/registry.conf.j2 new file mode 100644 index 0000000..38368f9 --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/templates/registry.conf.j2 @@ -0,0 +1,13 @@ +[[registry]] +prefix = "{{ item.prefix | default(item.location) }}" +insecure = {{ item.insecure | default('false') | string | lower }} +blocked = {{ item.blocked | default('false') | string | lower }} +location = "{{ item.location }}" +{% if item.mirrors is defined %} +{% for mirror in item.mirrors %} + +[[registry.mirror]] +location = "{{ mirror.location }}" +insecure = {{ mirror.insecure | default('false') | string | lower }} +{% endfor %} +{% endif %} diff --git a/kubespray/roles/container-engine/cri-o/templates/unqualified.conf.j2 b/kubespray/roles/container-engine/cri-o/templates/unqualified.conf.j2 new file mode 100644 index 0000000..fc91f8b --- /dev/null +++ b/kubespray/roles/container-engine/cri-o/templates/unqualified.conf.j2 @@ -0,0 +1,10 @@ +{%- set _unqualified_registries = [] -%} +{% for _registry in crio_registries if _registry.unqualified -%} +{% if _registry.prefix is defined -%} +{{ _unqualified_registries.append(_registry.prefix) }} +{% else %} +{{ _unqualified_registries.append(_registry.location) }} +{%- endif %} +{%- endfor %} + +unqualified-search-registries = {{ _unqualified_registries | string }} diff --git a/kubespray/roles/container-engine/crictl/handlers/main.yml b/kubespray/roles/container-engine/crictl/handlers/main.yml new file mode 100644 index 0000000..5319586 --- /dev/null +++ b/kubespray/roles/container-engine/crictl/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Get crictl completion + command: "{{ bin_dir }}/crictl completion" + changed_when: False + register: cri_completion + check_mode: false + +- name: Install crictl completion + copy: + dest: /etc/bash_completion.d/crictl + content: "{{ cri_completion.stdout }}" + mode: 0644 diff --git a/kubespray/roles/container-engine/crictl/tasks/crictl.yml b/kubespray/roles/container-engine/crictl/tasks/crictl.yml new file mode 100644 index 0000000..36e09e4 --- /dev/null +++ b/kubespray/roles/container-engine/crictl/tasks/crictl.yml @@ -0,0 +1,22 @@ +--- +- name: crictl | Download crictl + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.crictl) }}" + +- name: Install crictl config + template: + src: crictl.yaml.j2 + dest: /etc/crictl.yaml + owner: root + mode: 0644 + +- name: Copy crictl binary from download dir + copy: + src: "{{ local_release_dir }}/crictl" + dest: "{{ bin_dir }}/crictl" + mode: 0755 + remote_src: true + notify: + - Get crictl completion + - Install crictl completion diff --git a/kubespray/roles/container-engine/crictl/tasks/main.yml b/kubespray/roles/container-engine/crictl/tasks/main.yml new file mode 100644 index 0000000..99ed216 --- /dev/null +++ b/kubespray/roles/container-engine/crictl/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: install crictĺ + include_tasks: crictl.yml diff --git a/kubespray/roles/container-engine/crictl/templates/crictl.yaml.j2 b/kubespray/roles/container-engine/crictl/templates/crictl.yaml.j2 new file mode 100644 index 0000000..b97dbef --- /dev/null +++ b/kubespray/roles/container-engine/crictl/templates/crictl.yaml.j2 @@ -0,0 +1,4 @@ +runtime-endpoint: {{ cri_socket }} +image-endpoint: {{ cri_socket }} +timeout: 30 +debug: false diff --git a/kubespray/roles/container-engine/crun/defaults/main.yml b/kubespray/roles/container-engine/crun/defaults/main.yml new file mode 100644 index 0000000..65e08d7 --- /dev/null +++ b/kubespray/roles/container-engine/crun/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +crun_bin_dir: /usr/bin/ diff --git a/kubespray/roles/container-engine/crun/tasks/main.yml b/kubespray/roles/container-engine/crun/tasks/main.yml new file mode 100644 index 0000000..d541a49 --- /dev/null +++ b/kubespray/roles/container-engine/crun/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: crun | Download crun binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.crun) }}" + +- name: Copy crun binary from download dir + copy: + src: "{{ local_release_dir }}/crun" + dest: "{{ crun_bin_dir }}/crun" + mode: 0755 + remote_src: true diff --git a/kubespray/roles/container-engine/docker-storage/defaults/main.yml b/kubespray/roles/container-engine/docker-storage/defaults/main.yml new file mode 100644 index 0000000..6a69556 --- /dev/null +++ b/kubespray/roles/container-engine/docker-storage/defaults/main.yml @@ -0,0 +1,19 @@ +--- +docker_container_storage_setup_repository: https://github.com/projectatomic/container-storage-setup.git +docker_container_storage_setup_version: v0.6.0 +docker_container_storage_setup_profile_name: kubespray +docker_container_storage_setup_storage_driver: devicemapper +docker_container_storage_setup_container_thinpool: docker-pool +# It must be define a disk path for docker_container_storage_setup_devs. +# Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb +docker_container_storage_setup_data_size: 40%FREE +docker_container_storage_setup_min_data_size: 2G +docker_container_storage_setup_chunk_size: 512K +docker_container_storage_setup_growpart: "false" +docker_container_storage_setup_auto_extend_pool: "yes" +docker_container_storage_setup_pool_autoextend_threshold: 60 +docker_container_storage_setup_pool_autoextend_percent: 20 +docker_container_storage_setup_device_wait_timeout: 60 +docker_container_storage_setup_wipe_signatures: "false" +docker_container_storage_setup_container_root_lv_size: 40%FREE diff --git a/kubespray/roles/container-engine/docker-storage/files/install_container_storage_setup.sh b/kubespray/roles/container-engine/docker-storage/files/install_container_storage_setup.sh new file mode 100644 index 0000000..604c843 --- /dev/null +++ b/kubespray/roles/container-engine/docker-storage/files/install_container_storage_setup.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +set -e + +repository=${1:-https://github.com/projectatomic/container-storage-setup.git} +version=${2:-master} +profile_name=${3:-kubespray} +dir=`mktemp -d` +export GIT_DIR=$dir/.git +export GIT_WORK_TREE=$dir + +git init +git fetch --depth 1 $repository $version +git merge FETCH_HEAD +make -C $dir install +rm -rf /var/lib/container-storage-setup/$profile_name $dir + +set +e + +/usr/bin/container-storage-setup create $profile_name /etc/sysconfig/docker-storage-setup && /usr/bin/container-storage-setup activate $profile_name +# FIXME: exit status can be 1 for both fatal and non fatal errors in current release, +# could be improved by matching error strings +exit 0 diff --git a/kubespray/roles/container-engine/docker-storage/tasks/main.yml b/kubespray/roles/container-engine/docker-storage/tasks/main.yml new file mode 100644 index 0000000..4629381 --- /dev/null +++ b/kubespray/roles/container-engine/docker-storage/tasks/main.yml @@ -0,0 +1,48 @@ +--- + +- name: docker-storage-setup | install git and make + with_items: [git, make] + package: + pkg: "{{ item }}" + state: present + +- name: docker-storage-setup | docker-storage-setup sysconfig template + template: + src: docker-storage-setup.j2 + dest: /etc/sysconfig/docker-storage-setup + mode: 0644 + +- name: docker-storage-override-directory | docker service storage-setup override dir + file: + dest: /etc/systemd/system/docker.service.d + mode: 0755 + owner: root + group: root + state: directory + +- name: docker-storage-override | docker service storage-setup override file + copy: + dest: /etc/systemd/system/docker.service.d/override.conf + content: |- + ### This file is managed by Ansible + [Service] + EnvironmentFile=-/etc/sysconfig/docker-storage + + owner: root + group: root + mode: 0644 + +# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository +- name: docker-storage-setup | install lvm2 + package: + name: lvm2 + state: present + +- name: docker-storage-setup | install and run container-storage-setup + become: yes + script: | + install_container_storage_setup.sh \ + {{ docker_container_storage_setup_repository }} \ + {{ docker_container_storage_setup_version }} \ + {{ docker_container_storage_setup_profile_name }} + notify: Docker | reload systemd diff --git a/kubespray/roles/container-engine/docker-storage/templates/docker-storage-setup.j2 b/kubespray/roles/container-engine/docker-storage/templates/docker-storage-setup.j2 new file mode 100644 index 0000000..1a502b2 --- /dev/null +++ b/kubespray/roles/container-engine/docker-storage/templates/docker-storage-setup.j2 @@ -0,0 +1,35 @@ +{%if docker_container_storage_setup_storage_driver is defined%}STORAGE_DRIVER={{docker_container_storage_setup_storage_driver}}{%endif%} + +{%if docker_container_storage_setup_extra_storage_options is defined%}EXTRA_STORAGE_OPTIONS={{docker_container_storage_setup_extra_storage_options}}{%endif%} + +{%if docker_container_storage_setup_devs is defined%}DEVS={{docker_container_storage_setup_devs}}{%endif%} + +{%if docker_container_storage_setup_container_thinpool is defined%}CONTAINER_THINPOOL={{docker_container_storage_setup_container_thinpool}}{%endif%} + +{%if docker_container_storage_setup_vg is defined%}VG={{docker_container_storage_setup_vg}}{%endif%} + +{%if docker_container_storage_setup_root_size is defined%}ROOT_SIZE={{docker_container_storage_setup_root_size}}{%endif%} + +{%if docker_container_storage_setup_data_size is defined%}DATA_SIZE={{docker_container_storage_setup_data_size}}{%endif%} + +{%if docker_container_storage_setup_min_data_size is defined%}MIN_DATA_SIZE={{docker_container_storage_setup_min_data_size}}{%endif%} + +{%if docker_container_storage_setup_chunk_size is defined%}CHUNK_SIZE={{docker_container_storage_setup_chunk_size}}{%endif%} + +{%if docker_container_storage_setup_growpart is defined%}GROWPART={{docker_container_storage_setup_growpart}}{%endif%} + +{%if docker_container_storage_setup_auto_extend_pool is defined%}AUTO_EXTEND_POOL={{docker_container_storage_setup_auto_extend_pool}}{%endif%} + +{%if docker_container_storage_setup_pool_autoextend_threshold is defined%}POOL_AUTOEXTEND_THRESHOLD={{docker_container_storage_setup_pool_autoextend_threshold}}{%endif%} + +{%if docker_container_storage_setup_pool_autoextend_percent is defined%}POOL_AUTOEXTEND_PERCENT={{docker_container_storage_setup_pool_autoextend_percent}}{%endif%} + +{%if docker_container_storage_setup_device_wait_timeout is defined%}DEVICE_WAIT_TIMEOUT={{docker_container_storage_setup_device_wait_timeout}}{%endif%} + +{%if docker_container_storage_setup_wipe_signatures is defined%}WIPE_SIGNATURES={{docker_container_storage_setup_wipe_signatures}}{%endif%} + +{%if docker_container_storage_setup_container_root_lv_name is defined%}CONTAINER_ROOT_LV_NAME={{docker_container_storage_setup_container_root_lv_name}}{%endif%} + +{%if docker_container_storage_setup_container_root_lv_size is defined%}CONTAINER_ROOT_LV_SIZE={{docker_container_storage_setup_container_root_lv_size}}{%endif%} + +{%if docker_container_storage_setup_container_root_lv_mount_path is defined%}CONTAINER_ROOT_LV_MOUNT_PATH={{docker_container_storage_setup_container_root_lv_mount_path}}{%endif%} diff --git a/kubespray/roles/container-engine/docker/defaults/main.yml b/kubespray/roles/container-engine/docker/defaults/main.yml new file mode 100644 index 0000000..91227f9 --- /dev/null +++ b/kubespray/roles/container-engine/docker/defaults/main.yml @@ -0,0 +1,64 @@ +--- +docker_version: '20.10' +docker_cli_version: "{{ docker_version }}" + +docker_package_info: + pkgs: + +docker_repo_key_info: + repo_keys: + +docker_repo_info: + repos: + +docker_cgroup_driver: systemd + +docker_bin_dir: "/usr/bin" + +# flag to enable/disable docker cleanup +docker_orphan_clean_up: false + +# old docker package names to be removed +docker_remove_packages_yum: + - docker + - docker-common + - docker-engine + - docker-selinux.noarch + - docker-client + - docker-client-latest + - docker-latest + - docker-latest-logrotate + - docker-logrotate + - docker-engine-selinux.noarch + +# remove podman to avoid containerd.io confliction +podman_remove_packages_yum: + - podman + +docker_remove_packages_apt: + - docker + - docker-engine + - docker.io + +# Docker specific repos should be part of the docker role not containerd-common anymore +# Optional values for containerd apt repo +containerd_package_info: + pkgs: + +# Fedora docker-ce repo +docker_fedora_repo_base_url: 'https://download.docker.com/linux/fedora/{{ ansible_distribution_major_version }}/$basearch/stable' +docker_fedora_repo_gpgkey: 'https://download.docker.com/linux/fedora/gpg' + +# CentOS/RedHat docker-ce repo +docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/{{ ansible_distribution_major_version }}/$basearch/stable' +docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' + +# Ubuntu docker-ce repo +docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg' +docker_ubuntu_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88' + +# Debian docker-ce repo +docker_debian_repo_base_url: "https://download.docker.com/linux/debian" +docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg' +docker_debian_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88' diff --git a/kubespray/roles/container-engine/docker/files/cleanup-docker-orphans.sh b/kubespray/roles/container-engine/docker/files/cleanup-docker-orphans.sh new file mode 100644 index 0000000..d7a9a8f --- /dev/null +++ b/kubespray/roles/container-engine/docker/files/cleanup-docker-orphans.sh @@ -0,0 +1,38 @@ +#!/bin/bash +list_descendants () +{ + local children=$(ps -o pid= --ppid "$1") + for pid in $children + do + list_descendants "$pid" + done + [[ -n "$children" ]] && echo "$children" +} + +shim_search="^docker-containerd-shim|^containerd-shim" +count_shim_processes=$(pgrep -f $shim_search | wc -l) + +if [ ${count_shim_processes} -gt 0 ]; then + # Find all container pids from shims + orphans=$(pgrep -P $(pgrep -d ',' -f $shim_search) |\ + # Filter out valid docker pids, leaving the orphans + egrep -v $(docker ps -q | xargs docker inspect --format '{{.State.Pid}}' | awk '{printf "%s%s",sep,$1; sep="|"}')) + + if [[ -n "$orphans" && -n "$(ps -o ppid= $orphans)" ]] + then + # Get shim pids of orphans + orphan_shim_pids=$(ps -o pid= $(ps -o ppid= $orphans)) + + # Find all orphaned container PIDs + orphan_container_pids=$(for pid in $orphan_shim_pids; do list_descendants $pid; done) + + # Recursively kill all child PIDs of orphan shims + echo -e "Killing orphan container PIDs and descendants: \n$(ps -O ppid= $orphan_container_pids)" + kill -9 $orphan_container_pids || true + + else + echo "No orphaned containers found" + fi +else + echo "The node doesn't have any shim processes." +fi diff --git a/kubespray/roles/container-engine/docker/handlers/main.yml b/kubespray/roles/container-engine/docker/handlers/main.yml new file mode 100644 index 0000000..8c26de2 --- /dev/null +++ b/kubespray/roles/container-engine/docker/handlers/main.yml @@ -0,0 +1,32 @@ +--- +- name: restart docker + command: /bin/true + notify: + - Docker | reload systemd + - Docker | reload docker.socket + - Docker | reload docker + - Docker | wait for docker + +- name: Docker | reload systemd + systemd: + name: docker + daemon_reload: true + masked: no + +- name: Docker | reload docker.socket + service: + name: docker.socket + state: restarted + when: ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] or is_fedora_coreos + +- name: Docker | reload docker + service: + name: docker + state: restarted + +- name: Docker | wait for docker + command: "{{ docker_bin_dir }}/docker images" + register: docker_ready + retries: 20 + delay: 1 + until: docker_ready.rc == 0 diff --git a/kubespray/roles/container-engine/docker/meta/main.yml b/kubespray/roles/container-engine/docker/meta/main.yml new file mode 100644 index 0000000..d7e4751 --- /dev/null +++ b/kubespray/roles/container-engine/docker/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: container-engine/containerd-common + - role: container-engine/docker-storage + when: docker_container_storage_setup and ansible_os_family == "RedHat" diff --git a/kubespray/roles/container-engine/docker/tasks/docker_plugin.yml b/kubespray/roles/container-engine/docker/tasks/docker_plugin.yml new file mode 100644 index 0000000..8ee530e --- /dev/null +++ b/kubespray/roles/container-engine/docker/tasks/docker_plugin.yml @@ -0,0 +1,8 @@ +--- +- name: Install Docker plugin + command: docker plugin install --grant-all-permissions {{ docker_plugin | quote }} + when: docker_plugin is defined + register: docker_plugin_status + failed_when: + - docker_plugin_status.failed + - '"already exists" not in docker_plugin_status.stderr' diff --git a/kubespray/roles/container-engine/docker/tasks/main.yml b/kubespray/roles/container-engine/docker/tasks/main.yml new file mode 100644 index 0000000..ae7b574 --- /dev/null +++ b/kubespray/roles/container-engine/docker/tasks/main.yml @@ -0,0 +1,177 @@ +--- +- name: check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: set docker_version for openEuler + set_fact: + docker_version: '19.03' + when: ansible_distribution == "openEuler" + tags: + - facts + +- name: gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_distribution.split(' ')[0]|lower }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + tags: + - facts + +- name: Warn about Docker version on SUSE + debug: + msg: "SUSE distributions always install Docker from the distro repos" + when: ansible_pkg_mgr == 'zypper' + +- include_tasks: set_facts_dns.yml + when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' + tags: + - facts + +- import_tasks: pre-upgrade.yml + +- name: ensure docker-ce repository public key is installed + apt_key: + id: "{{ item }}" + url: "{{ docker_repo_key_info.url }}" + state: present + register: keyserver_task_result + until: keyserver_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" + with_items: "{{ docker_repo_key_info.repo_keys }}" + environment: "{{ proxy_env }}" + when: ansible_pkg_mgr == 'apt' + +- name: ensure docker-ce repository is enabled + apt_repository: + repo: "{{ item }}" + state: present + with_items: "{{ docker_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + +- name: Configure docker repository on Fedora + template: + src: "fedora_docker.repo.j2" + dest: "{{ yum_repo_dir }}/docker.repo" + mode: 0644 + when: ansible_distribution == "Fedora" and not is_ostree + +- name: Configure docker repository on RedHat/CentOS/OracleLinux/AlmaLinux/KylinLinux + template: + src: "rh_docker.repo.j2" + dest: "{{ yum_repo_dir }}/docker-ce.repo" + mode: 0644 + when: + - ansible_os_family == "RedHat" + - ansible_distribution != "Fedora" + - not is_ostree + +- name: Remove dpkg hold + dpkg_selections: + name: "{{ item }}" + selection: install + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: + - "{{ containerd_package }}" + - docker-ce + - docker-ce-cli + +- name: ensure docker packages are installed + package: + name: "{{ docker_package_info.pkgs }}" + state: "{{ docker_package_info.state | default('present') }}" + module_defaults: + apt: + update_cache: true + dnf: + enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}" + disablerepo: "{{ docker_package_info.disablerepo | default(omit) }}" + yum: + enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}" + zypper: + update_cache: true + register: docker_task_result + until: docker_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" + notify: restart docker + when: + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - not is_ostree + - docker_package_info.pkgs|length > 0 + +# This is required to ensure any apt upgrade will not break kubernetes +- name: Tell Debian hosts not to change the docker version with apt upgrade + dpkg_selections: + name: "{{ item }}" + selection: hold + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: + - "{{ containerd_package }}" + - docker-ce + - docker-ce-cli + +- name: ensure docker started, remove our config if docker start failed and try again + block: + - name: ensure service is started if docker packages are already present + service: + name: docker + state: started + when: docker_task_result is not changed + rescue: + - debug: # noqa unnamed-task + msg: "Docker start failed. Try to remove our config" + - name: remove kubespray generated config + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/systemd/system/docker.service.d/http-proxy.conf + - /etc/systemd/system/docker.service.d/docker-options.conf + - /etc/systemd/system/docker.service.d/docker-dns.conf + - /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf + notify: restart docker + +- name: flush handlers so we can wait for docker to come up + meta: flush_handlers + +# Install each plugin using a looped include to make error handling in the included task simpler. +- include_tasks: docker_plugin.yml + loop: "{{ docker_plugins }}" + loop_control: + loop_var: docker_plugin + +- name: Set docker systemd config + import_tasks: systemd.yml + +- name: ensure docker service is started and enabled + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - docker diff --git a/kubespray/roles/container-engine/docker/tasks/pre-upgrade.yml b/kubespray/roles/container-engine/docker/tasks/pre-upgrade.yml new file mode 100644 index 0000000..f346b46 --- /dev/null +++ b/kubespray/roles/container-engine/docker/tasks/pre-upgrade.yml @@ -0,0 +1,36 @@ +--- +- name: Remove legacy docker repo file + file: + path: "{{ yum_repo_dir }}/docker.repo" + state: absent + when: + - ansible_os_family == 'RedHat' + - not is_ostree + +- name: Ensure old versions of Docker are not installed. | Debian + apt: + name: '{{ docker_remove_packages_apt }}' + state: absent + when: + - ansible_os_family == 'Debian' + - (docker_versioned_pkg[docker_version | string] is search('docker-ce')) + + +- name: Ensure podman not installed. | RedHat + package: + name: '{{ podman_remove_packages_yum }}' + state: absent + when: + - ansible_os_family == 'RedHat' + - (docker_versioned_pkg[docker_version | string] is search('docker-ce')) + - not is_ostree + + +- name: Ensure old versions of Docker are not installed. | RedHat + package: + name: '{{ docker_remove_packages_yum }}' + state: absent + when: + - ansible_os_family == 'RedHat' + - (docker_versioned_pkg[docker_version | string] is search('docker-ce')) + - not is_ostree diff --git a/kubespray/roles/container-engine/docker/tasks/reset.yml b/kubespray/roles/container-engine/docker/tasks/reset.yml new file mode 100644 index 0000000..76d125b --- /dev/null +++ b/kubespray/roles/container-engine/docker/tasks/reset.yml @@ -0,0 +1,106 @@ +--- + +- name: Docker | Get package facts + package_facts: + manager: auto + +- name: Docker | Find docker packages + set_fact: + docker_packages_list: "{{ ansible_facts.packages.keys() | select('search', '^docker*') }}" + containerd_package: "{{ ansible_facts.packages.keys() | select('search', '^containerd*') }}" + +- name: Docker | Stop all running container + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -q | xargs -r {{ docker_bin_dir }}/docker kill" + args: + executable: /bin/bash + register: stop_all_containers + retries: 5 + until: stop_all_containers.rc == 0 + changed_when: true + delay: 5 + ignore_errors: true # noqa ignore-errors + when: docker_packages_list|length>0 + +- name: reset | remove all containers + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" + args: + executable: /bin/bash + register: remove_all_containers + retries: 4 + until: remove_all_containers.rc == 0 + delay: 5 + when: docker_packages_list|length>0 + +- name: Docker | Stop docker service + service: + name: "{{ item }}" + enabled: false + state: stopped + loop: + - docker + - docker.socket + - containerd + when: docker_packages_list|length>0 + +- name: Docker | Remove dpkg hold + dpkg_selections: + name: "{{ item }}" + selection: install + when: ansible_pkg_mgr == 'apt' + changed_when: false + with_items: + - "{{ docker_packages_list }}" + - "{{ containerd_package }}" + +- name: Docker | Remove docker package + package: + name: "{{ item }}" + state: absent + changed_when: false + with_items: + - "{{ docker_packages_list }}" + - "{{ containerd_package }}" + when: + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - not is_ostree + - docker_packages_list|length > 0 + +- name: Docker | ensure docker-ce repository is removed + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ docker_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + +- name: Docker | Remove docker repository on Fedora + file: + name: "{{ yum_repo_dir }}/docker.repo" + state: absent + when: ansible_distribution == "Fedora" and not is_ostree + +- name: Docker | Remove docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux + file: + name: "{{ yum_repo_dir }}/docker-ce.repo" + state: absent + when: + - ansible_os_family == "RedHat" + - ansible_distribution != "Fedora" + - not is_ostree + +- name: Docker | Remove docker configuration files + file: + name: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/docker.service.d/ + - /etc/systemd/system/docker.socket + - /etc/systemd/system/docker.service + - /etc/systemd/system/containerd.service + - /etc/systemd/system/containerd.service.d + - /var/lib/docker + - /etc/docker + ignore_errors: true # noqa ignore-errors + +- name: Docker | systemctl daemon-reload # noqa 503 + systemd: + daemon_reload: true diff --git a/kubespray/roles/container-engine/docker/tasks/set_facts_dns.yml b/kubespray/roles/container-engine/docker/tasks/set_facts_dns.yml new file mode 100644 index 0000000..d800373 --- /dev/null +++ b/kubespray/roles/container-engine/docker/tasks/set_facts_dns.yml @@ -0,0 +1,66 @@ +--- + +- name: set dns server for docker + set_fact: + docker_dns_servers: "{{ dns_servers }}" + +- name: show docker_dns_servers + debug: + msg: "{{ docker_dns_servers }}" + +- name: add upstream dns servers + set_fact: + docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}" + when: dns_mode in ['coredns', 'coredns_dual'] + +- name: add global searchdomains + set_fact: + docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" + +- name: check system nameservers + shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' + args: + executable: /bin/bash + changed_when: False + register: system_nameservers + check_mode: no + +- name: check system search domains + # noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false + # Therefore -o pipefail is not applicable in this specific instance + shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/' + args: + executable: /bin/bash + changed_when: False + register: system_search_domains + check_mode: no + +- name: add system nameservers to docker options + set_fact: + docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}" + when: system_nameservers.stdout + +- name: add system search domains to docker options + set_fact: + docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}" + when: system_search_domains.stdout + +- name: check number of nameservers + fail: + msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3." + when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool + +- name: rtrim number of nameservers to 3 + set_fact: + docker_dns_servers: "{{ docker_dns_servers[0:3] }}" + when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool + +- name: check number of search domains + fail: + msg: "Too many search domains" + when: docker_dns_search_domains|length > 6 + +- name: check length of search domains + fail: + msg: "Search domains exceeded limit of 256 characters" + when: docker_dns_search_domains|join(' ')|length > 256 diff --git a/kubespray/roles/container-engine/docker/tasks/systemd.yml b/kubespray/roles/container-engine/docker/tasks/systemd.yml new file mode 100644 index 0000000..0c040fe --- /dev/null +++ b/kubespray/roles/container-engine/docker/tasks/systemd.yml @@ -0,0 +1,68 @@ +--- +- name: Create docker service systemd directory if it doesn't exist + file: + path: /etc/systemd/system/docker.service.d + state: directory + mode: 0755 + +- name: Write docker proxy drop-in + template: + src: http-proxy.conf.j2 + dest: /etc/systemd/system/docker.service.d/http-proxy.conf + mode: 0644 + notify: restart docker + when: http_proxy is defined or https_proxy is defined + +- name: get systemd version + # noqa 303 - systemctl is called intentionally here + shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2 + args: + executable: /bin/bash + register: systemd_version + when: not is_ostree + changed_when: false + check_mode: false + +- name: Write docker.service systemd file + template: + src: docker.service.j2 + dest: /etc/systemd/system/docker.service + mode: 0644 + register: docker_service_file + notify: restart docker + when: + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - not is_fedora_coreos + +- name: Write docker options systemd drop-in + template: + src: docker-options.conf.j2 + dest: "/etc/systemd/system/docker.service.d/docker-options.conf" + mode: 0644 + notify: restart docker + +- name: Write docker dns systemd drop-in + template: + src: docker-dns.conf.j2 + dest: "/etc/systemd/system/docker.service.d/docker-dns.conf" + mode: 0644 + notify: restart docker + when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' + +- name: Copy docker orphan clean up script to the node + copy: + src: cleanup-docker-orphans.sh + dest: "{{ bin_dir }}/cleanup-docker-orphans.sh" + mode: 0755 + when: docker_orphan_clean_up | bool + +- name: Write docker orphan clean up systemd drop-in + template: + src: docker-orphan-cleanup.conf.j2 + dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf" + mode: 0644 + notify: restart docker + when: docker_orphan_clean_up | bool + +- name: Flush handlers + meta: flush_handlers diff --git a/kubespray/roles/container-engine/docker/templates/docker-dns.conf.j2 b/kubespray/roles/container-engine/docker/templates/docker-dns.conf.j2 new file mode 100644 index 0000000..d501a19 --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/docker-dns.conf.j2 @@ -0,0 +1,6 @@ +[Service] +Environment="DOCKER_DNS_OPTIONS=\ + {% for d in docker_dns_servers %}--dns {{ d }} {% endfor %} \ + {% for d in docker_dns_search_domains %}--dns-search {{ d }} {% endfor %} \ + {% for o in docker_dns_options %}--dns-opt {{ o }} {% endfor %} \ +" \ No newline at end of file diff --git a/kubespray/roles/container-engine/docker/templates/docker-options.conf.j2 b/kubespray/roles/container-engine/docker/templates/docker-options.conf.j2 new file mode 100644 index 0000000..ae661ad --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/docker-options.conf.j2 @@ -0,0 +1,11 @@ +[Service] +Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }} \ +--exec-opt native.cgroupdriver={{ docker_cgroup_driver }} \ +{% for i in docker_insecure_registries %}--insecure-registry={{ i }} {% endfor %} \ +{% for i in docker_registry_mirrors %}--registry-mirror={{ i }} {% endfor %} \ +--data-root={{ docker_daemon_graph }} \ +{% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %}" + +{% if docker_mount_flags is defined and docker_mount_flags != "" %} +MountFlags={{ docker_mount_flags }} +{% endif %} diff --git a/kubespray/roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2 b/kubespray/roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2 new file mode 100644 index 0000000..787a941 --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2 @@ -0,0 +1,2 @@ +[Service] +ExecStartPost=-{{ bin_dir }}/cleanup-docker-orphans.sh \ No newline at end of file diff --git a/kubespray/roles/container-engine/docker/templates/docker.service.j2 b/kubespray/roles/container-engine/docker/templates/docker.service.j2 new file mode 100644 index 0000000..fd1d061 --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/docker.service.j2 @@ -0,0 +1,47 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.com +{% if ansible_os_family == "RedHat" %} +After=network.target {{ ' docker-storage-setup.service' if docker_container_storage_setup else '' }} containerd.service +BindsTo=containerd.service +{{ 'Wants=docker-storage-setup.service' if docker_container_storage_setup else '' }} +{% elif ansible_os_family == "Debian" %} +After=network.target docker.socket containerd.service +BindsTo=containerd.service +Wants=docker.socket +{% elif ansible_os_family == "Suse" %} +After=network.target lvm2-monitor.service SuSEfirewall2.service +# After=network.target containerd.service +# BindsTo=containerd.service +{% endif %} + +[Service] +Type=notify +{% if docker_storage_options is defined %} +Environment="DOCKER_STORAGE_OPTIONS={{ docker_storage_options }}" +{% endif %} +Environment=GOTRACEBACK=crash +ExecReload=/bin/kill -s HUP $MAINPID +Delegate=yes +KillMode=process +ExecStart={{ docker_bin_dir }}/dockerd \ +{% if ansible_os_family == "Suse" %} + --add-runtime oci=/usr/sbin/docker-runc \ +{% endif %} + $DOCKER_OPTS \ + $DOCKER_STORAGE_OPTIONS \ + $DOCKER_DNS_OPTIONS +{% if not is_ostree and systemd_version.stdout|int >= 226 %} +TasksMax=infinity +{% endif %} +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=1min +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/container-engine/docker/templates/fedora_docker.repo.j2 b/kubespray/roles/container-engine/docker/templates/fedora_docker.repo.j2 new file mode 100644 index 0000000..3958ff0 --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/fedora_docker.repo.j2 @@ -0,0 +1,7 @@ +[docker-ce] +name=Docker-CE Repository +baseurl={{ docker_fedora_repo_base_url }} +enabled=1 +gpgcheck={{ '1' if docker_fedora_repo_gpgkey else '0' }} +gpgkey={{ docker_fedora_repo_gpgkey }} +{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} diff --git a/kubespray/roles/container-engine/docker/templates/http-proxy.conf.j2 b/kubespray/roles/container-engine/docker/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..212f30f --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/roles/container-engine/docker/templates/rh_docker.repo.j2 b/kubespray/roles/container-engine/docker/templates/rh_docker.repo.j2 new file mode 100644 index 0000000..178bbc2 --- /dev/null +++ b/kubespray/roles/container-engine/docker/templates/rh_docker.repo.j2 @@ -0,0 +1,10 @@ +[docker-ce] +name=Docker-CE Repository +baseurl={{ docker_rh_repo_base_url }} +enabled=0 +gpgcheck={{ '1' if docker_rh_repo_gpgkey else '0' }} +keepcache={{ docker_rpm_keepcache | default('1') }} +gpgkey={{ docker_rh_repo_gpgkey }} +{% if http_proxy is defined %} +proxy={{ http_proxy }} +{% endif %} diff --git a/kubespray/roles/container-engine/docker/vars/amazon.yml b/kubespray/roles/container-engine/docker/vars/amazon.yml new file mode 100644 index 0000000..4871f4a --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/amazon.yml @@ -0,0 +1,15 @@ +--- +# https://docs.aws.amazon.com/en_us/AmazonECS/latest/developerguide/docker-basics.html + +docker_versioned_pkg: + 'latest': docker + '18.09': docker-18.09.9ce-2.amzn2 + '19.03': docker-19.03.13ce-1.amzn2 + '20.10': docker-20.10.7-5.amzn2 + +docker_version: "latest" + +docker_package_info: + pkgs: + - "{{ docker_versioned_pkg[docker_version | string] }}" + enablerepo: amzn2extra-docker diff --git a/kubespray/roles/container-engine/docker/vars/clearlinux.yml b/kubespray/roles/container-engine/docker/vars/clearlinux.yml new file mode 100644 index 0000000..fbb7a22 --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/clearlinux.yml @@ -0,0 +1,4 @@ +--- +docker_package_info: + pkgs: + - "containers-basic" diff --git a/kubespray/roles/container-engine/docker/vars/debian-stretch.yml b/kubespray/roles/container-engine/docker/vars/debian-stretch.yml new file mode 100644 index 0000000..f26f60b --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/debian-stretch.yml @@ -0,0 +1,45 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-1" + 'stable': "{{ containerd_package }}=1.4.3-1" + 'edge': "{{ containerd_package }}=1.4.3-1" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://download.docker.com/linux/debian/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_debian_repo_gpgkey }}' + repo_keys: + - '{{ docker_debian_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb {{ docker_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/roles/container-engine/docker/vars/debian.yml b/kubespray/roles/container-engine/docker/vars/debian.yml new file mode 100644 index 0000000..d46bfa8 --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/debian.yml @@ -0,0 +1,49 @@ +--- +# containerd package info is only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-2" + '1.4.4': "{{ containerd_package }}=1.4.4-1" + '1.4.6': "{{ containerd_package }}=1.4.6-1" + '1.4.9': "{{ containerd_package }}=1.4.9-1" + '1.4.12': "{{ containerd_package }}=1.4.12-1" + '1.6.4': "{{ containerd_package }}=1.6.4-1" + 'stable': "{{ containerd_package }}=1.6.4-1" + 'edge': "{{ containerd_package }}=1.6.4-1" + +# https://download.docker.com/linux/debian/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} + '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_debian_repo_gpgkey }}' + repo_keys: + - '{{ docker_debian_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb {{ docker_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/roles/container-engine/docker/vars/fedora.yml b/kubespray/roles/container-engine/docker/vars/fedora.yml new file mode 100644 index 0000000..8972fd8 --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/fedora.yml @@ -0,0 +1,37 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.fc{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.fc{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.fc{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.fc{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.fc{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.4-3.1.fc{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.4-3.1.fc{{ ansible_distribution_major_version }}" + +# https://docs.docker.com/install/linux/docker-ce/fedora/ +# https://download.docker.com/linux/fedora//x86_64/stable/Packages/ +docker_versioned_pkg: + 'latest': docker-ce + '19.03': docker-ce-19.03.15-3.fc{{ ansible_distribution_major_version }} + '20.10': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '19.03': docker-ce-cli-19.03.15-3.fc{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/roles/container-engine/docker/vars/kylin.yml b/kubespray/roles/container-engine/docker/vars/kylin.yml new file mode 100644 index 0000000..d212d41 --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/kylin.yml @@ -0,0 +1,41 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/roles/container-engine/docker/vars/redhat-7.yml b/kubespray/roles/container-engine/docker/vars/redhat-7.yml new file mode 100644 index 0000000..e37c416 --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/redhat-7.yml @@ -0,0 +1,40 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el7" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el7" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el7" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el7" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el7" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el7" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el7" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el7" + 'stable': "{{ containerd_package }}-1.6.4-3.1.el7" + 'edge': "{{ containerd_package }}-1.6.4-3.1.el7" + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-18.09.9-3.el7 + '19.03': docker-ce-19.03.15-3.el7 + '20.10': docker-ce-20.10.20-3.el7 + 'stable': docker-ce-20.10.20-3.el7 + 'edge': docker-ce-20.10.20-3.el7 + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-18.09.9-3.el7 + '19.03': docker-ce-cli-19.03.15-3.el7 + '20.10': docker-ce-cli-20.10.20-3.el7 + 'stable': docker-ce-cli-20.10.20-3.el7 + 'edge': docker-ce-cli-20.10.20-3.el7 + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/roles/container-engine/docker/vars/redhat.yml b/kubespray/roles/container-engine/docker/vars/redhat.yml new file mode 100644 index 0000000..836763f --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/redhat.yml @@ -0,0 +1,40 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/roles/container-engine/docker/vars/suse.yml b/kubespray/roles/container-engine/docker/vars/suse.yml new file mode 100644 index 0000000..2d9fbf0 --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/suse.yml @@ -0,0 +1,6 @@ +--- +docker_package_info: + state: latest + pkgs: + - docker + - containerd diff --git a/kubespray/roles/container-engine/docker/vars/ubuntu-16.yml b/kubespray/roles/container-engine/docker/vars/ubuntu-16.yml new file mode 100644 index 0000000..78a6cea --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/ubuntu-16.yml @@ -0,0 +1,46 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-2" + '1.4.4': "{{ containerd_package }}=1.4.4-1" + '1.4.6': "{{ containerd_package }}=1.4.6-1" + 'stable': "{{ containerd_package }}=1.4.6-1" + 'edge': "{{ containerd_package }}=1.4.6-1" + +# https://download.docker.com/linux/ubuntu/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce-cli=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:20.10.7~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_ubuntu_repo_gpgkey }}' + repo_keys: + - '{{ docker_ubuntu_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/roles/container-engine/docker/vars/ubuntu.yml b/kubespray/roles/container-engine/docker/vars/ubuntu.yml new file mode 100644 index 0000000..cced07e --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/ubuntu.yml @@ -0,0 +1,49 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}=1.3.7-1" + '1.3.9': "{{ containerd_package }}=1.3.9-1" + '1.4.3': "{{ containerd_package }}=1.4.3-2" + '1.4.4': "{{ containerd_package }}=1.4.4-1" + '1.4.6': "{{ containerd_package }}=1.4.6-1" + '1.4.9': "{{ containerd_package }}=1.4.9-1" + '1.4.12': "{{ containerd_package }}=1.4.12-1" + '1.6.4': "{{ containerd_package }}=1.6.4-1" + 'stable': "{{ containerd_package }}=1.6.4-1" + 'edge': "{{ containerd_package }}=1.6.4-1" + +# https://download.docker.com/linux/ubuntu/ +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} + '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }} + +docker_package_info: + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" + +docker_repo_key_info: + url: '{{ docker_ubuntu_repo_gpgkey }}' + repo_keys: + - '{{ docker_ubuntu_repo_repokey }}' + +docker_repo_info: + repos: + - > + deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/kubespray/roles/container-engine/docker/vars/uniontech.yml b/kubespray/roles/container-engine/docker/vars/uniontech.yml new file mode 100644 index 0000000..79b8abc --- /dev/null +++ b/kubespray/roles/container-engine/docker/vars/uniontech.yml @@ -0,0 +1,45 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + '1.6.8': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + disablerepo: "UniontechOS-20-AppStream" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/converge.yml b/kubespray/roles/container-engine/gvisor/molecule/default/converge.yml new file mode 100644 index 0000000..b14d078 --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + gvisor_enabled: true + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd + - role: container-engine/gvisor diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/files/10-mynet.conf b/kubespray/roles/container-engine/gvisor/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/files/container.json b/kubespray/roles/container-engine/gvisor/molecule/default/files/container.json new file mode 100644 index 0000000..acec0ce --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "gvisor1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "gvisor1.0.log", + "linux": {} +} diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/files/sandbox.json b/kubespray/roles/container-engine/gvisor/molecule/default/files/sandbox.json new file mode 100644 index 0000000..a8da54d --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "gvisor1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/molecule.yml b/kubespray/roles/container-engine/gvisor/molecule/default/molecule.yml new file mode 100644 index 0000000..5c3a7e1 --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/prepare.yml b/kubespray/roles/container-engine/gvisor/molecule/default/prepare.yml new file mode 100644 index 0000000..8f9ef7d --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/roles/container-engine/gvisor/molecule/default/tests/test_default.py b/kubespray/roles/container-engine/gvisor/molecule/default/tests/test_default.py new file mode 100644 index 0000000..1cb7fb0 --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + gvisorruntime = "/usr/local/bin/runsc" + with host.sudo(): + cmd = host.command(gvisorruntime + " --version") + assert cmd.rc == 0 + assert "runsc version" in cmd.stdout + + +def test_run_pod(host): + runtime = "runsc" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/gvisor1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/roles/container-engine/gvisor/tasks/main.yml b/kubespray/roles/container-engine/gvisor/tasks/main.yml new file mode 100644 index 0000000..fa5bd72 --- /dev/null +++ b/kubespray/roles/container-engine/gvisor/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: gVisor | Download runsc binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}" + +- name: gVisor | Download containerd-shim-runsc-v1 binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}" + +- name: gVisor | Copy binaries + copy: + src: "{{ local_release_dir }}/gvisor-{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: yes + with_items: + - runsc + - containerd-shim-runsc-v1 diff --git a/kubespray/roles/container-engine/kata-containers/OWNERS b/kubespray/roles/container-engine/kata-containers/OWNERS new file mode 100644 index 0000000..fa95926 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pasqualet +reviewers: + - pasqualet diff --git a/kubespray/roles/container-engine/kata-containers/defaults/main.yml b/kubespray/roles/container-engine/kata-containers/defaults/main.yml new file mode 100644 index 0000000..fc909ca --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/defaults/main.yml @@ -0,0 +1,9 @@ +--- +kata_containers_dir: /opt/kata +kata_containers_config_dir: /etc/kata-containers +kata_containers_containerd_bin_dir: /usr/local/bin + +kata_containers_qemu_default_memory: "{{ ansible_memtotal_mb }}" +kata_containers_qemu_debug: 'false' +kata_containers_qemu_sandbox_cgroup_only: 'true' +kata_containers_qemu_enable_mem_prealloc: 'false' diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/converge.yml b/kubespray/roles/container-engine/kata-containers/molecule/default/converge.yml new file mode 100644 index 0000000..a6fdf81 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + kata_containers_enabled: true + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd + - role: container-engine/kata-containers diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf b/kubespray/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..f10935b --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/files/container.json b/kubespray/roles/container-engine/kata-containers/molecule/default/files/container.json new file mode 100644 index 0000000..e2e9a56 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "kata1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "kata1.0.log", + "linux": {} +} diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/files/sandbox.json b/kubespray/roles/container-engine/kata-containers/molecule/default/files/sandbox.json new file mode 100644 index 0000000..326a578 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "kata1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/molecule.yml b/kubespray/roles/container-engine/kata-containers/molecule/default/molecule.yml new file mode 100644 index 0000000..63a942b --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu18 + box: generic/ubuntu1804 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/prepare.yml b/kubespray/roles/container-engine/kata-containers/molecule/default/prepare.yml new file mode 100644 index 0000000..8a0978f --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/roles/container-engine/kata-containers/molecule/default/tests/test_default.py b/kubespray/roles/container-engine/kata-containers/molecule/default/tests/test_default.py new file mode 100644 index 0000000..e10fff4 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/molecule/default/tests/test_default.py @@ -0,0 +1,37 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " version") + assert cmd.rc == 0 + assert "kata-runtime" in cmd.stdout + + +def test_run_check(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " check") + assert cmd.rc == 0 + assert "System is capable of running" in cmd.stdout + + +def test_run_pod(host): + runtime = "kata-qemu" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/kata1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/roles/container-engine/kata-containers/tasks/main.yml b/kubespray/roles/container-engine/kata-containers/tasks/main.yml new file mode 100644 index 0000000..54bd25d --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: kata-containers | Download kata binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.kata_containers) }}" + +- name: kata-containers | Copy kata-containers binary + unarchive: + src: "{{ local_release_dir }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" + dest: "/" + mode: 0755 + remote_src: yes + +- name: kata-containers | Create config directory + file: + path: "{{ kata_containers_config_dir }}" + state: directory + mode: 0755 + +- name: kata-containers | Set configuration + template: + src: "{{ item }}.j2" + dest: "{{ kata_containers_config_dir }}/{{ item }}" + mode: 0644 + with_items: + - configuration-qemu.toml + +- name: kata-containers | Set containerd bin + vars: + shim: "{{ item }}" + template: + dest: "{{ kata_containers_containerd_bin_dir }}/containerd-shim-kata-{{ item }}-v2" + src: containerd-shim-kata-v2.j2 + mode: 0755 + with_items: + - qemu + +- name: kata-containers | Load vhost kernel modules + modprobe: + state: present + name: "{{ item }}" + with_items: + - vhost_vsock + - vhost_net + +- name: kata-containers | Persist vhost kernel modules + copy: + dest: /etc/modules-load.d/kubespray-kata-containers.conf + mode: 0644 + content: | + vhost_vsock + vhost_net diff --git a/kubespray/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 b/kubespray/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 new file mode 100644 index 0000000..4038242 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 @@ -0,0 +1,624 @@ +# Copyright (c) 2017-2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "cli/config/configuration-qemu.toml.in" +# XXX: Project: +# XXX: Name: Kata Containers +# XXX: Type: kata + +[hypervisor.qemu] +path = "/opt/kata/bin/qemu-system-x86_64" +{% if kata_containers_version is version('2.2.0', '>=') %} +kernel = "/opt/kata/share/kata-containers/vmlinux.container" +{% else %} +kernel = "/opt/kata/share/kata-containers/vmlinuz.container" +{% endif %} +image = "/opt/kata/share/kata-containers/kata-containers.img" +machine_type = "q35" + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one. +# Default false +# confidential_guest = true + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = [] + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/opt/kata/bin/qemu-system-x86_64"] +valid_hypervisor_paths = ["/opt/kata/bin/qemu-system-x86_64"] + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "" + +# Path to the firmware. +# If you want that qemu uses the default firmware leave this option empty +firmware = "" + +# Machine accelerators +# comma-separated list of machine accelerators to pass to the hypervisor. +# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` +machine_accelerators="" + +# CPU features +# comma-separated list of cpu features to pass to the cpu +# For example, `cpu_features = "pmu=off,vmx=off" +cpu_features="pmu=off" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to 1 +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. +default_maxvcpus = 0 + +# Bridges can be used to hot plug devices. +# Limitations: +# * Currently only pci bridges are supported +# * Until 30 devices per bridge can be hot plugged. +# * Until 5 PCI bridges can be cold plugged per VM. +# This limitation could be a bug in qemu or in the kernel +# Default number of bridges per SB/VM: +# unspecified or 0 --> will be set to 1 +# > 1 <= 5 --> will be set to the specified number +# > 5 --> will be set to 5 +default_bridges = 1 + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set 2048 MiB. +default_memory = {{ kata_containers_qemu_default_memory }} +# +# Default memory slots per SB/VM. +# If unspecified then it will be set 10. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = 10 + +# The size in MiB will be plused to max memory of hypervisor. +# It is the memory address space for the NVDIMM devie. +# If set block storage driver (block_device_driver) to "nvdimm", +# should set memory_offset to the size of block device. +# Default 0 +#memory_offset = 0 + +# Specifies virtio-mem will be enabled or not. +# Please note that this option should be used with the command +# "echo 1 > /proc/sys/vm/overcommit_memory". +# Default false +#enable_virtio_mem = true + +# Disable block device from being used for a container's rootfs. +# In case of a storage driver like devicemapper where a container's +# root file system is backed by a block device, the block device is passed +# directly to the hypervisor for performance reasons. +# This flag prevents the block device from being passed to the hypervisor, +# 9pfs is used instead to pass the rootfs. +disable_block_device_use = false + +# Shared file system type: +# - virtio-fs (default) +# - virtio-9p +{% if kata_containers_version is version('2.2.0', '>=') %} +shared_fs = "virtio-fs" +{% else %} +shared_fs = "virtio-9p" +{% endif %} + +# Path to vhost-user-fs daemon. +virtio_fs_daemon = "/opt/kata/libexec/kata-qemu/virtiofsd" + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/opt/kata/libexec/kata-qemu/virtiofsd"] +valid_virtio_fs_daemon_paths = ["/opt/kata/libexec/kata-qemu/virtiofsd"] + +# Default size of DAX cache in MiB +virtio_fs_cache_size = 0 + +# Extra args for virtiofsd daemon +# +# Format example: +# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] +# +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = ["--thread-pool-size=1"] + +# Cache mode: +# +# - none +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "always" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-scsi, virtio-blk +# or nvdimm. +block_device_driver = "virtio-scsi" + +# Specifies cache-related options will be set to block devices or not. +# Default false +#block_device_cache_set = true + +# Specifies cache-related options for block devices. +# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. +# Default false +#block_device_cache_direct = true + +# Specifies cache-related options for block devices. +# Denotes whether flush requests for the device are ignored. +# Default false +#block_device_cache_noflush = true + +# Enable iothreads (data-plane) to be used. This causes IO to be +# handled in a separate IO thread. This is currently only implemented +# for SCSI. +# +enable_iothreads = false + +# Enable pre allocation of VM RAM, default false +# Enabling this will result in lower container density +# as all of the memory will be allocated and locked +# This is useful when you want to reserve all the memory +# upfront or in the cases where you want memory latencies +# to be very predictable +# Default false +enable_mem_prealloc = {{ kata_containers_qemu_enable_mem_prealloc }} + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +# This is useful when you want to use vhost-user network +# stacks within the container. This will automatically +# result in memory pre allocation +#enable_hugepages = true + +# Enable vhost-user storage device, default false +# Enabling this will result in some Linux reserved block type +# major range 240-254 being chosen to represent vhost-user devices. +enable_vhost_user_store = false + +# The base directory specifically used for vhost-user devices. +# Its sub-path "block" is used for block devices; "block/sockets" is +# where we expect vhost-user sockets to live; "block/devices" is where +# simulated block device nodes for vhost-user devices to live. +vhost_user_store_path = "/var/run/kata-containers/vhost-user" + +# Enable vIOMMU, default false +# Enabling this will result in the VM having a vIOMMU device +# This will also add the following options to the kernel's +# command line: intel_iommu=on,iommu=pt +#enable_iommu = true + +# Enable IOMMU_PLATFORM, default false +# Enabling this will result in the VM device having iommu_platform=on set +#enable_iommu_platform = true + +# List of valid annotations values for the vhost user store path +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/var/run/kata-containers/vhost-user"] +valid_vhost_user_store_paths = ["/var/run/kata-containers/vhost-user"] + +# Enable file based guest memory support. The default is an empty string which +# will disable this feature. In the case of virtio-fs, this is enabled +# automatically and '/dev/shm' is used as the backing folder. +# This option will be ignored if VM templating is enabled. +#file_mem_backend = "" + +# List of valid annotations values for the file_mem_backend annotation +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: [""] +valid_file_mem_backends = [""] + +# Enable swap of vm memory. Default false. +# The behaviour is undefined if mem_prealloc is also set to true +#enable_swap = true + +# -pflash can add image file to VM. The arguments of it should be in format +# of ["/path/to/flash0.img", "/path/to/flash1.img"] +pflashes = [] + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. This extra output is added +# to the proxy logs, but only when proxy debug is also enabled. +# +# Default false +enable_debug = {{ kata_containers_qemu_debug }} + +# Disable the customizations done in the runtime when it detects +# that it is running on top a VMM. This will result in the runtime +# behaving as it would when running on bare metal. +# +#disable_nesting_checks = true + +# This is the msize used for 9p shares. It is the number of bytes +# used for 9p packet payload. +#msize_9p = 8192 + +# If true and vsocks are supported, use vsocks to communicate directly +# with the agent and no proxy is started, otherwise use unix +# sockets and start a proxy to communicate with the agent. +# Default false +#use_vsock = true + +# If false and nvdimm is supported, use nvdimm device to plug guest image. +# Otherwise virtio-block device is used. +# Default is false +#disable_image_nvdimm = true + +# VFIO devices are hotplugged on a bridge by default. +# Enable hotplugging on root bus. This may be required for devices with +# a large PCI bar, as this is a current limitation with hotplugging on +# a bridge. This value is valid for "pc" machine type. +# Default false +#hotplug_vfio_on_root_bus = true + +# Before hot plugging a PCIe device, you need to add a pcie_root_port device. +# Use this parameter when using some large PCI bar devices, such as Nvidia GPU +# The value means the number of pcie_root_port +# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" +# Default 0 +#pcie_root_port = 2 + +# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off +# security (vhost-net runs ring0) for network I/O performance. +#disable_vhost_net = true + +# +# Default entropy source. +# The path to a host source of entropy (including a real hardware RNG) +# /dev/urandom and /dev/random are two main options. +# Be aware that /dev/random is a blocking source of entropy. If the host +# runs out of entropy, the VMs boot time will increase leading to get startup +# timeouts. +# The source of entropy /dev/urandom is non-blocking and provides a +# generally acceptable source of entropy. It should work well for pretty much +# all practical purposes. +#entropy_source= "/dev/urandom" + +# List of valid annotations values for entropy_source +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/dev/urandom","/dev/random",""] +valid_entropy_sources = ["/dev/urandom","/dev/random",""] + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered will scanning for hooks, +# but it will not abort container execution. +#guest_hook_path = "/usr/share/oci/hooks" +# +# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. +# Default 0-sized value means unlimited rate. +#rx_rate_limiter_max_rate = 0 +# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) +# to discipline traffic. +# Default 0-sized value means unlimited rate. +#tx_rate_limiter_max_rate = 0 + +# Set where to save the guest memory dump file. +# If set, when GUEST_PANICKED event occurred, +# guest memeory will be dumped to host filesystem under guest_memory_dump_path, +# This directory will be created automatically if it does not exist. +# +# The dumped file(also called vmcore) can be processed with crash or gdb. +# +# WARNING: +# Dump guest’s memory can take very long depending on the amount of guest memory +# and use much disk space. +#guest_memory_dump_path="/var/crash/kata" + +# If enable paging. +# Basically, if you want to use "gdb" rather than "crash", +# or need the guest-virtual addresses in the ELF vmcore, +# then you should enable paging. +# +# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details +#guest_memory_dump_paging=false + +# Enable swap in the guest. Default false. +# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device +# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") +# is bigger than 0. +# The size of the swap device should be +# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. +# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. +# If swap_in_bytes and memory_limit_in_bytes is not set, the size should +# be default_memory. +#enable_guest_swap = true + +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Note: Requires "initrd=" to be set ("image=" is not supported). +# +# Default false +#enable_template = true + +# Specifies the path of template. +# +# Default "/run/vc/vm/template" +#template_path = "/run/vc/vm/template" + +# The number of caches of VMCache: +# unspecified or == 0 --> VMCache is disabled +# > 0 --> will be set to the specified number +# +# VMCache is a function that creates VMs as caches before using it. +# It helps speed up new container creation. +# The function consists of a server and some clients communicating +# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. +# The VMCache server will create some VMs and cache them by factory cache. +# It will convert the VM to gRPC format and transport it when gets +# requestion from clients. +# Factory grpccache is the VMCache client. It will request gRPC format +# VM and convert it back to a VM. If VMCache function is enabled, +# kata-runtime will request VM from factory grpccache when it creates +# a new sandbox. +# +# Default 0 +#vm_cache_number = 0 + +# Specify the address of the Unix socket that is used by VMCache. +# +# Default /var/run/kata-containers/cache.sock +#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" + +[proxy.kata] +path = "/opt/kata/libexec/kata-containers/kata-proxy" + +# If enabled, proxy messages will be sent to the system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +[shim.kata] +path = "/opt/kata/libexec/kata-containers/kata-shim" + +# If enabled, shim messages will be sent to the system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +# If enabled, the shim will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# +# Note: By default, the shim runs in a separate network namespace. Therefore, +# to allow it to send trace details to the Jaeger agent running on the host, +# it is necessary to set 'disable_new_netns=true' so that it runs in the host +# network namespace. +# +# (default: disabled) +#enable_tracing = true + +[agent.kata] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +# Enable agent tracing. +# +# If enabled, the default trace mode is "dynamic" and the +# default trace type is "isolated". The trace mode and type are set +# explicitly with the `trace_type=` and `trace_mode=` options. +# +# Notes: +# +# - Tracing is ONLY enabled when `enable_tracing` is set: explicitly +# setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing` +# will NOT activate agent tracing. +# +# - See https://github.com/kata-containers/agent/blob/master/TRACING.md for +# full details. +# +# (default: disabled) +#enable_tracing = true +# +#trace_mode = "dynamic" +#trace_type = "isolated" + +# Comma separated list of kernel modules and their parameters. +# These modules will be loaded in the guest kernel using modprobe(8). +# The following example can be used to load two kernel modules with parameters +# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] +# The first word is considered as the module name and the rest as its parameters. +# Container will not be started when: +# * A kernel module is specified and the modprobe command is not installed in the guest +# or it fails loading the module. +# * The module is not available in the guest or it doesn't met the guest kernel +# requirements, like architecture and version. +# +kernel_modules=[] + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[netmon] +# If enabled, the network monitoring process gets started when the +# sandbox is created. This allows for the detection of some additional +# network being added to the existing network namespace, after the +# sandbox has been created. +# (default: disabled) +#enable_netmon = true + +# Specify the path to the netmon binary. +path = "/opt/kata/libexec/kata-containers/kata-netmon" + +# If enabled, netmon messages will be sent to the system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="tcfilter" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=true + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `enable_netmon` +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# If you are using docker, `disable_new_netns` only works with `docker run --net=none` +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType +sandbox_cgroup_only={{ kata_containers_qemu_sandbox_cgroup_only }} + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=[] + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=[] + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ diff --git a/kubespray/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 b/kubespray/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 new file mode 100644 index 0000000..a3cb830 --- /dev/null +++ b/kubespray/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 @@ -0,0 +1,2 @@ +#!/bin/bash +KATA_CONF_FILE={{ kata_containers_config_dir }}/configuration-{{ shim }}.toml {{ kata_containers_dir }}/bin/containerd-shim-kata-v2 $@ diff --git a/kubespray/roles/container-engine/meta/main.yml b/kubespray/roles/container-engine/meta/main.yml new file mode 100644 index 0000000..3e068d6 --- /dev/null +++ b/kubespray/roles/container-engine/meta/main.yml @@ -0,0 +1,58 @@ +# noqa role-name - this is a meta role that doesn't need a name +--- +dependencies: + - role: container-engine/validate-container-engine + tags: + - container-engine + - validate-container-engine + + - role: container-engine/kata-containers + when: + - kata_containers_enabled + tags: + - container-engine + - kata-containers + + - role: container-engine/gvisor + when: + - gvisor_enabled + - container_manager in ['docker', 'containerd'] + tags: + - container-engine + - gvisor + + - role: container-engine/crun + when: + - crun_enabled + tags: + - container-engine + - crun + + - role: container-engine/youki + when: + - youki_enabled + - container_manager == 'crio' + tags: + - container-engine + - youki + + - role: container-engine/cri-o + when: + - container_manager == 'crio' + tags: + - container-engine + - crio + + - role: container-engine/containerd + when: + - container_manager == 'containerd' + tags: + - container-engine + - containerd + + - role: container-engine/cri-dockerd + when: + - container_manager == 'docker' + tags: + - container-engine + - docker diff --git a/kubespray/roles/container-engine/nerdctl/handlers/main.yml b/kubespray/roles/container-engine/nerdctl/handlers/main.yml new file mode 100644 index 0000000..27895ff --- /dev/null +++ b/kubespray/roles/container-engine/nerdctl/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Get nerdctl completion + command: "{{ bin_dir }}/nerdctl completion bash" + changed_when: False + register: nerdctl_completion + check_mode: false + +- name: Install nerdctl completion + copy: + dest: /etc/bash_completion.d/nerdctl + content: "{{ nerdctl_completion.stdout }}" + mode: 0644 diff --git a/kubespray/roles/container-engine/nerdctl/tasks/main.yml b/kubespray/roles/container-engine/nerdctl/tasks/main.yml new file mode 100644 index 0000000..ad08839 --- /dev/null +++ b/kubespray/roles/container-engine/nerdctl/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: nerdctl | Download nerdctl + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.nerdctl) }}" + +- name: nerdctl | Copy nerdctl binary from download dir + copy: + src: "{{ local_release_dir }}/nerdctl" + dest: "{{ bin_dir }}/nerdctl" + mode: 0755 + remote_src: true + owner: root + group: root + become: true + notify: + - Get nerdctl completion + - Install nerdctl completion + +- name: nerdctl | Create configuration dir + file: + path: /etc/nerdctl + state: directory + mode: 0755 + owner: root + group: root + become: true + +- name: nerdctl | Install nerdctl configuration + template: + src: nerdctl.toml.j2 + dest: /etc/nerdctl/nerdctl.toml + mode: 0644 + owner: root + group: root + become: true diff --git a/kubespray/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 b/kubespray/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 new file mode 100644 index 0000000..cd1b5f9 --- /dev/null +++ b/kubespray/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 @@ -0,0 +1,10 @@ +debug = false +debug_full = false +address = "{{ cri_socket }}" +namespace = "k8s.io" +snapshotter = "native" +cni_path = "/opt/cni/bin" +cni_netconfpath = "/etc/cni/net.d" +cgroup_manager = "{{ kubelet_cgroup_driver | default('systemd') }}" +insecure_registry = {{ (containerd_insecure_registries is defined and containerd_insecure_registries|length>0) | bool | lower }} +hosts_dir = ["/etc/containerd/certs.d"] diff --git a/kubespray/roles/container-engine/runc/defaults/main.yml b/kubespray/roles/container-engine/runc/defaults/main.yml new file mode 100644 index 0000000..af8aa08 --- /dev/null +++ b/kubespray/roles/container-engine/runc/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +runc_bin_dir: "{{ bin_dir }}" + +runc_package_name: runc diff --git a/kubespray/roles/container-engine/runc/tasks/main.yml b/kubespray/roles/container-engine/runc/tasks/main.yml new file mode 100644 index 0000000..7a8e336 --- /dev/null +++ b/kubespray/roles/container-engine/runc/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: runc | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: runc | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: runc | Uninstall runc package managed by package manager + package: + name: "{{ runc_package_name }}" + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + +- name: runc | Download runc binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.runc) }}" + +- name: Copy runc binary from download dir + copy: + src: "{{ downloads.runc.dest }}" + dest: "{{ runc_bin_dir }}/runc" + mode: 0755 + remote_src: true + +- name: runc | Remove orphaned binary + file: + path: /usr/bin/runc + state: absent + when: runc_bin_dir != "/usr/bin" + ignore_errors: true # noqa ignore-errors diff --git a/kubespray/roles/container-engine/skopeo/tasks/main.yml b/kubespray/roles/container-engine/skopeo/tasks/main.yml new file mode 100644 index 0000000..033ae62 --- /dev/null +++ b/kubespray/roles/container-engine/skopeo/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: skopeo | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: skopeo | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: skopeo | Uninstall skopeo package managed by package manager + package: + name: skopeo + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + +- name: skopeo | Download skopeo binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.skopeo) }}" + +- name: Copy skopeo binary from download dir + copy: + src: "{{ downloads.skopeo.dest }}" + dest: "{{ bin_dir }}/skopeo" + mode: 0755 + remote_src: true diff --git a/kubespray/roles/container-engine/validate-container-engine/tasks/main.yml b/kubespray/roles/container-engine/validate-container-engine/tasks/main.yml new file mode 100644 index 0000000..fdd60e0 --- /dev/null +++ b/kubespray/roles/container-engine/validate-container-engine/tasks/main.yml @@ -0,0 +1,153 @@ +--- +- name: validate-container-engine | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + tags: + - facts + +- name: validate-container-engine | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + tags: + - facts + +- name: Ensure kubelet systemd unit exists + stat: + path: "/etc/systemd/system/kubelet.service" + register: kubelet_systemd_unit_exists + tags: + - facts + +- name: Populate service facts + service_facts: + tags: + - facts + +- name: Check if containerd is installed + find: + file_type: file + recurse: yes + use_regex: yes + patterns: + - containerd.service$ + paths: + - /lib/systemd + - /etc/systemd + - /run/systemd + register: containerd_installed + tags: + - facts + +- name: Check if docker is installed + find: + file_type: file + recurse: yes + use_regex: yes + patterns: + - docker.service$ + paths: + - /lib/systemd + - /etc/systemd + - /run/systemd + register: docker_installed + tags: + - facts + +- name: Check if crio is installed + find: + file_type: file + recurse: yes + use_regex: yes + patterns: + - crio.service$ + paths: + - /lib/systemd + - /etc/systemd + - /run/systemd + register: crio_installed + tags: + - facts + +- name: Uninstall containerd + block: + - name: Drain node + include_role: + name: remove-node/pre-remove + apply: + tags: + - pre-remove + when: kubelet_systemd_unit_exists.stat.exists + - name: Stop kubelet + service: + name: kubelet + state: stopped + when: kubelet_systemd_unit_exists.stat.exists + - name: Remove Containerd + import_role: + name: container-engine/containerd + tasks_from: reset + handlers_from: reset + vars: + service_name: containerd.service + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + - container_manager != "containerd" + - docker_installed.matched == 0 + - containerd_installed.matched > 0 + - ansible_facts.services[service_name]['state'] == 'running' + +- name: Uninstall docker + block: + - name: Drain node + include_role: + name: remove-node/pre-remove + apply: + tags: + - pre-remove + when: kubelet_systemd_unit_exists.stat.exists + - name: Stop kubelet + service: + name: kubelet + state: stopped + when: kubelet_systemd_unit_exists.stat.exists + - name: Remove Docker + import_role: + name: container-engine/docker + tasks_from: reset + vars: + service_name: docker.service + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + - container_manager != "docker" + - docker_installed.matched > 0 + - ansible_facts.services[service_name]['state'] == 'running' + +- name: Uninstall crio + block: + - name: Drain node + include_role: + name: remove-node/pre-remove + apply: + tags: + - pre-remove + when: kubelet_systemd_unit_exists.stat.exists + - name: Stop kubelet + service: + name: kubelet + state: stopped + when: kubelet_systemd_unit_exists.stat.exists + - name: Remove CRI-O + import_role: + name: container-engine/cri-o + tasks_from: reset + vars: + service_name: crio.service + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + - container_manager != "crio" + - crio_installed.matched > 0 + - ansible_facts.services[service_name]['state'] == 'running' diff --git a/kubespray/roles/container-engine/youki/defaults/main.yml b/kubespray/roles/container-engine/youki/defaults/main.yml new file mode 100644 index 0000000..2250f22 --- /dev/null +++ b/kubespray/roles/container-engine/youki/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +youki_bin_dir: "{{ bin_dir }}" diff --git a/kubespray/roles/container-engine/youki/molecule/default/converge.yml b/kubespray/roles/container-engine/youki/molecule/default/converge.yml new file mode 100644 index 0000000..11ef8f6 --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + youki_enabled: true + container_manager: crio + roles: + - role: kubespray-defaults + - role: container-engine/cri-o + - role: container-engine/youki diff --git a/kubespray/roles/container-engine/youki/molecule/default/files/10-mynet.conf b/kubespray/roles/container-engine/youki/molecule/default/files/10-mynet.conf new file mode 100644 index 0000000..b9fa3ba --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.4.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/kubespray/roles/container-engine/youki/molecule/default/files/container.json b/kubespray/roles/container-engine/youki/molecule/default/files/container.json new file mode 100644 index 0000000..a5d5094 --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "youki1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "youki1.0.log", + "linux": {} +} diff --git a/kubespray/roles/container-engine/youki/molecule/default/files/sandbox.json b/kubespray/roles/container-engine/youki/molecule/default/files/sandbox.json new file mode 100644 index 0000000..b2a4ffe --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "youki1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/kubespray/roles/container-engine/youki/molecule/default/molecule.yml b/kubespray/roles/container-engine/youki/molecule/default/molecule.yml new file mode 100644 index 0000000..5c3a7e1 --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +driver: + name: vagrant + provider: + name: libvirt + options: + driver: kvm +lint: | + set -e + yamllint -c ../../../.yamllint . +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + - name: almalinux8 + box: almalinux/8 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + lint: + name: ansible-lint + options: + c: ../../../.ansible-lint + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra + lint: + name: flake8 diff --git a/kubespray/roles/container-engine/youki/molecule/default/prepare.yml b/kubespray/roles/container-engine/youki/molecule/default/prepare.yml new file mode 100644 index 0000000..e948686 --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: crio + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: 0644 + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: 0755 + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: 0644 + with_items: + - 10-mynet.conf diff --git a/kubespray/roles/container-engine/youki/molecule/default/tests/test_default.py b/kubespray/roles/container-engine/youki/molecule/default/tests/test_default.py new file mode 100644 index 0000000..54ed5c5 --- /dev/null +++ b/kubespray/roles/container-engine/youki/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + youkiruntime = "/usr/local/bin/youki" + with host.sudo(): + cmd = host.command(youkiruntime + " --version") + assert cmd.rc == 0 + assert "youki" in cmd.stdout + + +def test_run_pod(host): + runtime = "youki" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/youki1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/kubespray/roles/container-engine/youki/tasks/main.yml b/kubespray/roles/container-engine/youki/tasks/main.yml new file mode 100644 index 0000000..1095c3d --- /dev/null +++ b/kubespray/roles/container-engine/youki/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: youki | Download youki + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.youki) }}" + +- name: youki | Copy youki binary from download dir + copy: + src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki" + dest: "{{ youki_bin_dir }}/youki" + mode: 0755 + remote_src: true diff --git a/kubespray/roles/download/defaults/main.yml b/kubespray/roles/download/defaults/main.yml new file mode 100644 index 0000000..0f958d0 --- /dev/null +++ b/kubespray/roles/download/defaults/main.yml @@ -0,0 +1,1943 @@ +--- +local_release_dir: /tmp/releases +download_cache_dir: /tmp/kubespray_cache + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false + +# do not delete remote cache files after using them +# NOTE: Setting this parameter to TRUE is only really useful when developing kubespray +download_keep_remote_cache: false + +# Only useful when download_run_once is false: Localy cached files and images are +# uploaded to kubernetes nodes. Also, images downloaded on those nodes are copied +# back to the ansible runner's cache, if they are not yet preset. +download_force_cache: false + +# Used to only evaluate vars from download role +skip_downloads: false + +# Optionally skip kubeadm images download +skip_kubeadm_images: false +kubeadm_images: {} + +# if this is set to true will only download files once. Doesn't work +# on Flatcar Container Linux by Kinvolk unless the download_localhost is true and localhost +# is running another OS type. Default compress level is 1 (fastest). +download_run_once: false +download_compress: 1 + +# if this is set to true will download container +download_container: true + +# if this is set to true, uses the localhost for download_run_once mode +# (requires docker and sudo to access docker). You may want this option for +# local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes. +# Otherwise, uses the first node in the kube_control_plane group to store images +# in the download_run_once mode. +download_localhost: false + +# Always pull images if set to True. Otherwise check by the repo's tag/digest. +download_always_pull: false + +# Some problems may occur when downloading files over https proxy due to ansible bug +# https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +# SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +download_validate_certs: true + +# Use the first kube_control_plane if download_localhost is not set +download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}" + +# The docker_image_info_command might seems weird but we are using raw/endraw and `{{ `{{` }}` to manage the double jinja2 processing +docker_image_pull_command: "{{ docker_bin_dir }}/docker pull" +docker_image_info_command: "{{ docker_bin_dir }}/docker images -q | xargs -i {{ '{{' }} docker_bin_dir }}/docker inspect -f {% raw %}'{{ '{{' }} if .RepoTags }}{{ '{{' }} join .RepoTags \",\" }}{{ '{{' }} end }}{{ '{{' }} if .RepoDigests }},{{ '{{' }} join .RepoDigests \",\" }}{{ '{{' }} end }}' {% endraw %} {} | tr '\n' ','" +nerdctl_image_info_command: "{{ bin_dir }}/nerdctl -n k8s.io images --format '{% raw %}{{ .Repository }}:{{ .Tag }}{% endraw %}' 2>/dev/null | grep -v ^:$ | tr '\n' ','" +nerdctl_image_pull_command: "{{ bin_dir }}/nerdctl -n k8s.io pull --quiet {{ nerdctl_extra_flags }}" +crictl_image_info_command: "{{ bin_dir }}/crictl images --verbose | awk -F ': ' '/RepoTags|RepoDigests/ {print $2}' | tr '\n' ','" +crictl_image_pull_command: "{{ bin_dir }}/crictl pull" + +image_command_tool: "{%- if container_manager == 'containerd' -%}nerdctl{%- elif container_manager == 'crio' -%}crictl{%- else -%}{{ container_manager }}{%- endif -%}" +image_command_tool_on_localhost: "{{ image_command_tool }}" + +image_pull_command: "{{ lookup('vars', image_command_tool + '_image_pull_command') }}" +image_info_command: "{{ lookup('vars', image_command_tool + '_image_info_command') }}" +image_pull_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_pull_command') }}" +image_info_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_info_command') }}" + +# Arch of Docker images and needed packages +image_arch: "{{host_architecture | default('amd64')}}" + +# Nerdctl insecure flag set +nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 -%}--insecure-registry{%- else -%}{%- endif -%}' + +# Versions +kubeadm_version: "{{ kube_version }}" +crun_version: 1.4.5 +runc_version: v1.1.4 +kata_containers_version: 2.4.1 +youki_version: 0.0.1 +gvisor_version: 20210921 +containerd_version: 1.6.14 +cri_dockerd_version: 0.2.2 + +# this is relevant when container_manager == 'docker' +docker_containerd_version: 1.6.4 + +# gcr and kubernetes image repo define +gcr_image_repo: "gcr.io" +kube_image_repo: "registry.k8s.io" + +# docker image repo define +docker_image_repo: "docker.io" + +# quay image repo define +quay_image_repo: "quay.io" + +# github image repo define (ex multus only use that) +github_image_repo: "ghcr.io" + +# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults +# after migration to container download +calico_version: "v3.24.5" +calico_ctl_version: "{{ calico_version }}" +calico_cni_version: "{{ calico_version }}" +calico_flexvol_version: "{{ calico_version }}" +calico_policy_version: "{{ calico_version }}" +calico_typha_version: "{{ calico_version }}" +calico_apiserver_version: "{{ calico_version }}" +typha_enabled: false +calico_apiserver_enabled: false + +flannel_version: "v0.19.2" +flannel_cni_version: "v1.1.0" +cni_version: "v1.1.1" +weave_version: 2.8.1 +pod_infra_version: "3.7" + +cilium_version: "v1.12.1" +cilium_cli_version: "v0.12.5" +cilium_enable_hubble: false + +kube_ovn_version: "v1.10.7" +kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}" +kube_router_version: "v1.5.1" +multus_version: "v3.8-{{ image_arch }}" +helm_version: "v3.10.3" +nerdctl_version: "1.0.0" +krew_version: "v0.4.3" +skopeo_version: v1.10.0 + +# Get kubernetes major version (i.e. 1.17.4 => 1.17) +kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}" + +etcd_supported_versions: + v1.25: "v3.5.6" + v1.24: "v3.5.6" + v1.23: "v3.5.6" +etcd_version: "{{ etcd_supported_versions[kube_major_version] }}" + +crictl_supported_versions: + v1.25: "v1.25.0" + v1.24: "v1.24.0" + v1.23: "v1.23.0" +crictl_version: "{{ crictl_supported_versions[kube_major_version] }}" + +crio_supported_versions: + v1.25: v1.25.1 + v1.24: v1.24.3 + v1.23: v1.23.2 +crio_version: "{{ crio_supported_versions[kube_major_version] }}" + +# Download URLs +kubelet_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" +kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" +etcd_download_url: "https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" +cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" +calicoctl_download_url: "https://github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +calicoctl_alternate_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" +ciliumcli_download_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" +crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +crio_download_url: "https://storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz" +helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" +runc_download_url: "https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +crun_download_url: "https://github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" +youki_download_url: "https://github.com/containers/youki/releases/download/v{{ youki_version }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz" +kata_containers_download_url: "https://github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" +# gVisor only supports amd64 and uses x86_64 to in the download link +gvisor_runsc_download_url: "https://storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +gvisor_containerd_shim_runsc_download_url: "https://storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" +nerdctl_download_url: "https://github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +krew_download_url: "https://github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz" +containerd_download_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" +skopeo_download_url: "https://github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}" + +crictl_checksums: + arm: + v1.25.0: c4efe3649af5542f2b07cdfc0be62e9e13c7bb846a9b59d57e190c764f28dae4 + v1.24.0: 1ab8a88d6ce1e9cff1c76fc454d2d41cf0c89e98c6db15a41804a3a5874cbf89 + v1.23.0: c20f7a118183d1e6da24c3709471ea0b4dee51cb709f958e0d90f3acb4eb59ae + arm64: + v1.25.0: 651c939eca010bbf48cc3932516b194028af0893025f9e366127f5b50ad5c4f4 + v1.24.0: b6fe172738dfa68ca4c71ade53574e859bf61a3e34d21b305587b1ad4ab28d24 + v1.23.0: 91094253e77094435027998a99b9b6a67b0baad3327975365f7715a1a3bd9595 + amd64: + v1.25.0: 86ab210c007f521ac4cdcbcf0ae3fb2e10923e65f16de83e0e1db191a07f0235 + v1.24.0: 3df4a4306e0554aea4fdc26ecef9eea29a58c8460bebfaca3405799787609880 + v1.23.0: b754f83c80acdc75f93aba191ff269da6be45d0fc2d3f4079704e7d1424f1ca8 + ppc64le: + v1.25.0: 1b77d1f198c67b2015104eee6fe7690465b8efa4675ea6b4b958c63d60a487e7 + v1.24.0: 586c263678c6d8d543976607ea1732115e622d44993e2bcbed29832370d3a754 + v1.23.0: 53db9e605a3042ea77bbf42a01a4e248dea8839bcab544c491745874f73aeee7 + +crio_archive_checksums: + arm: + v1.25.1: 0 + v1.24.3: 0 + v1.23.2: 0 + arm64: + v1.25.1: add26675dc993b292024d007fd69980d8d1e75c675851d0cb687fe1dfd1f3008 + v1.24.3: d8040602e03c90e4482b4ce97b63c2cf1301cd2afb0aa722342f40f3537a1a1f + v1.23.2: a866ccc3a062ac29906a619b9045a5e23b11fa9249f8802f8be0849491d01fbd + amd64: + v1.25.1: 49f98a38805740c40266a5bf3badc28e4ca725ccf923327c75c00fccc241f562 + v1.24.3: 43f6e3a7ad6ae8cf05ed0f1e493578c28abf6a798aedb8ee9643ff7c25a68ca3 + v1.23.2: 5c766dbf366a80f8b5dbc7a06d566f43e7cb0675186c50062df01f3b3cb5e526 + ppc64le: + v1.25.1: 0 + v1.24.3: 0 + v1.23.2: 0 + +# Checksum +# Kubernetes versions above Kubespray's current target version are untested and should be used with caution. +kubelet_checksums: + arm: + v1.25.5: fdaade890ed44ce55a1086dd1b1bde44daac02f90eacd9faf14fd182af1ffda0 + v1.25.4: 1af9c17daa07c215a8ce40f7e65896279276e11b6f7a7d9ae850a0561e149ad8 + v1.25.3: 9745a48340ca61b00f0094e4b8ff210839edcf05420f0d57b3cb1748cb887060 + v1.25.2: 995f885543fa61a08bd4f1008ba6d7417a1c45bd2a8e0f70c67a83e53b46eea5 + v1.25.1: 6fe430ad91e1ed50cf5cc396aa204fda0889c36b8a3b84619d633cd9a6a146e2 + v1.25.0: ad45ac3216aa186648fd034dec30a00c1a2d2d1187cab8aae21aa441a13b4faa + v1.24.9: d91e82f0bee5c7dc3bb0b35d67dc2335404ec44a316fc369cec5c749522e9bd5 + v1.24.8: 0756748c89293e2c502ffcf7a275c3bb98a7b919d59130e5e0376c8afb327fe2 + v1.24.7: 3841e80f54ee5576928e799e4962231261bcdafe94868a310a8782da9a321da5 + v1.24.6: 084e469d1d3b60363e5e20812ee0d909daa5496f3e6ebd305d1f23d1fe0709d4 + v1.24.5: ce55155d1aff0c72effee19c6bef534c2b7d1b23ec701d70335d181bd2d12a87 + v1.24.4: f9d387c18159a4473e7bdc290780ba1b1c92e8d8b41f558c15ee044db54636cd + v1.24.3: fe34b1a0892cdfb015f66be8f2d3450130a5d04f9466732020e186c8da0ee799 + v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22 + v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec + v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3 + v1.23.15: 6b057a9b55b20b8a8cf0c6d2947ed5bcff77ffb311f785755cecce5917944910 + v1.23.14: ddbb9930e232b51b2f3bbe6f944b96642cfb120f4fdd1820128fb842a454a947 + v1.23.13: 58f744247dbc8bca50b01ec1c25b0b5868736319f9cc8bf964fc2c1dd9eef0f9 + v1.23.12: 5b7c38206ba3c04cd756062b74093548ac6309dc086c2893351b1c479f5415a3 + v1.23.11: 93bbe3a130dcd7d5732e8b949f13ba8728bb37d3d4bd58408f99352cf484f9d0 + v1.23.10: d6d5aa26f16e735962cac5f2ee8ddc0d3b9d2aa14b8e968cb55fc9745f9a8b03 + v1.23.9: f22edc9838eb3d0788d951c1fc8fdb0e1bf6c43ad638a215172f25b54ca27a8a + v1.23.8: 53c4f44ba10d9c53a4526fccb4d20146e52473788058684ca2de74ae0e1abb11 + v1.23.7: f9910e670aea8845b6b07ecd36d43d8ac0901ee3244264d2bc0f6ea918d862ac + v1.23.6: 2f3fb387c20de1da586ac6bc43fa714fb7c2116b4243a2ef1e28ecfbba324cea + v1.23.5: 9505cf63fb56a1d90d1db9c1507587621455a152ef16d871e802875e1e7b4587 + v1.23.4: e67a51013ed59ea3df0ad1d54863d483cc99247584992b8cad6dd612135a70c5 + v1.23.3: 80a2c005e7b6c4e9363a18fa1d8911b6592eb2f93cbaa8a56fe5f6f59515d1a4 + v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908 + v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce + v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba + arm64: + v1.25.5: 18aa53ff59740a11504218905b51b29cc78fb8b5dd818a619141afa9dafb8f5a + v1.25.4: 8ff80a12381fad2e96c9cec6712591018c830cdd327fc7bd825237aa51a6ada3 + v1.25.3: 929d25fc3f901749b058141a9c624ff379759869e09df49b75657c0be3141091 + v1.25.2: c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae + v1.25.1: b6baa99b99ecc1f358660208a9a27b64c65f3314ff95a84c73091b51ac98484b + v1.25.0: 69572a7b3d179d4a479aa2e0f90e2f091d8d84ef33a35422fc89975dc137a590 + v1.24.9: 34021c6cf593ffc4361e9e2adc3d6e4f5683383eeb894f40d63ccfa268e84f4c + v1.24.8: 4e1427651e4ff3927f96ce4b93c471ccc76c683fc1619ee0d677d77345b54edb + v1.24.7: d8bd38e595ca061c53d3b7d1daebe5b3cc1ad44c731666bd5e842d336077db4b + v1.24.6: 2a7b8e131d6823462e38bc1514b5dea5dca86254b3a12ed4a0fa653c2e06dd0e + v1.24.5: dd5dcea80828979981654ec0732b197be252a3259a527cbc299d9575bc2de3e8 + v1.24.4: 2d9817c1e9e1edd9480aa05862ea6e9655a9512d820b1933175f5d7c8253ca61 + v1.24.3: 6c04ae25ee9b434f40e0d2466eb4ef5604dc43f306ddf1e5f165fc9d3c521e12 + v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0 + v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf + v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e + v1.23.15: b5540d2b67f325ad79af6b86a88bc3d1a8a225453911e7ebb7387788ce355a87 + v1.23.14: 80cdff15398c8215bb7337efdee25b40c862befbdf7925f6a8aca71bc9a79eae + v1.23.13: 4e2297c9893d425bfcd80741b95fb1a5b59b4fd4f4bcf782ccab94760e653cdf + v1.23.12: b802f12c79a9797f83a366c617144d019d2994fc724c75f642a9d031ce6a3488 + v1.23.11: ce4f568c3193e8e0895062f783980da89adb6b54a399c797656a3ce172ddb2fc + v1.23.10: 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8 + v1.23.9: c11b14ab3fa8e567c54e893c5a937f53618b26c9b62416cc8aa7760835f68350 + v1.23.8: 1b4ec707e29e8136e3516a437cb541a79c52c69b1331a7add2b47e7ac7d032e6 + v1.23.7: e96b746a77b00c04f1926035899a583ce28f02e9a5dca26c1bfb8251ca6a43bb + v1.23.6: 11a0310e8e7af5a11539ac26d6c14cf1b77d35bce4ca74e4bbd053ed1afc8650 + v1.23.5: 61f7e3ae0eb00633d3b5163c046cfcae7e73b5f26d4ffcf343f3a45904323583 + v1.23.4: c4f09c9031a34549fbaa48231b115fee6e170ce6832dce26d4b50b040aad2311 + v1.23.3: 95c36d0d1e65f6167f8fa80df04b3a816bc803e6bb5554f04d6af849c729a77d + v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f + v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d + v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9 + amd64: + v1.25.5: 16b23e1254830805b892cfccf2687eb3edb4ea54ffbadb8cc2eee6d3b1fab8e6 + v1.25.4: 7f7437e361f829967ee02e30026d7e85219693432ac5e930cc98dd9c7ddb2fac + v1.25.3: d5c89c5e5dae6afa5f06a3e0e653ac3b93fa9a93c775a715531269ec91a54abe + v1.25.2: 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3 + v1.25.1: 63e38bcbc4437ce10227695f8722371ec0d178067f1031d09fe1f59b6fcf214a + v1.25.0: 7f9183fce12606818612ce80b6c09757452c4fb50aefea5fc5843951c5020e24 + v1.24.9: 8753b9ae0c3e22f09dafdb4178492582c28874f70844de38dc43eb3fad5ca8bb + v1.24.8: 2da0b93857cf352bff5d1eb42e34d398a5971b63a53d8687b45179a78540d6d6 + v1.24.7: 4d24c97c924c40971412cc497145ad823e4b7b87ccda97ebced375f7e886e9e2 + v1.24.6: f8b606f542327128e404d2e66a72a40dc2ddb4175fb8e93c55effeacea60921b + v1.24.5: 2448debe26e90341b038d7ccfcd55942c76ef3d9db48e42ceae5e8de3fbad631 + v1.24.4: 0f34d12aaa1b911adbf75dd63df03d0674dde921fa0571a51acd2b5b576ba0a4 + v1.24.3: da575ceb7c44fddbe7d2514c16798f39f8c10e54b5dbef3bcee5ac547637db11 + v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343 + v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde + v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52 + v1.23.15: 5cf382d911c13c9cc8f770251b3a2fd9399c70ac50337874f670b9078f88231d + v1.23.14: f2bef00508790f632d035a6cfdd31539115611bfc93c5a3266ceb95bb2f27b76 + v1.23.13: 4d8f796b82dbe2b89b6d587bfeedf66724526b211c75a53456d4ac4014e3dcca + v1.23.12: 98ffa8a736d3e43debb1aa61ae71dea3671989cde5e9e44c6ee51a3d47c63614 + v1.23.11: b0e6d413f9b4cf1007fcb9f0ea6460ed5273a50c945ae475c224036b0ab817f7 + v1.23.10: c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b + v1.23.9: a5975920be1de0768e77ef101e4e42b179406add242c0883a7dc598f2006d387 + v1.23.8: 1ba15ad4d9d99cfc3cbef922b5101492ad74e812629837ac2e5705a68cb7af1e + v1.23.7: 518f67200e853253ed6424488d6148476144b6b796ec7c6160cff15769b3e12a + v1.23.6: fbb83e35f6b9f7cae19c50694240291805ca9c4028676af868306553b3e9266c + v1.23.5: 253b9db2299b09b91e4c09781ce1d2db6bad2099cf16ba210245159f48d0d5e4 + v1.23.4: ec3db57edcce219c24ef37f4a6a2eef5a1543e4a9bd15e7ecc993b9f74950d91 + v1.23.3: 8f9d2dd992af82855fbac2d82e030429b08ba7775e4fee7bf043eb857dfb0317 + v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6 + v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4 + v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518 + ppc64le: + v1.25.5: 3071e26e648ff50880d699ccabd677537b9e2762d1ece9e11401adde664f8e28 + v1.25.4: 3d4806fae6f39f091ea3d9fb195aa6d3e1ef779f56e485b6afbb328c25e15bdc + v1.25.3: 447a8b34646936bede22c93ca85f0a98210c9f61d6963a7d71f7f6a5152af1d1 + v1.25.2: a45dc00ac3a8074c3e9ec6a45b63c0a654529a657d929f28bd79c550a0d213d7 + v1.25.1: c1e3373ac088e934635fb13004a21ada39350033bfa0e4b258c114cb86b69138 + v1.25.0: 8015f88d1364cf77436c157de8a5d3ab87f1cb2dfaa9289b097c92a808845491 + v1.24.9: 3011fee2b8256e54efa24f3fc294642a6106a483722d89e82aa962a4435c86b2 + v1.24.8: 58ee62ed2fd4858d308ba672183ea0704555d977892510042fc2108da54cb93c + v1.24.7: 621ce04d0cb1c66065303d062bf9ac248225b8428b1adbca3f6fa6dd2eda13cc + v1.24.6: ea9068c28a0107f5e1317ef8ba3a23965d95ee57db6fa71ee27433cdaa0fe33c + v1.24.5: 56844b2594212e81d7cd4470f81da5d0f79876f044ee6d1707166fe76fdcb03a + v1.24.4: 38475815448bd5d43e893b6a9ac9fd3ae8b0dbddf8a7ba92d3f83437b5c1b916 + v1.24.3: 0bfb73c1932c8593ef6281efc6d16bf440275fed1272466f76101ea0f0971907 + v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132 + v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06 + v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931 + v1.23.15: f149c45853bda1f9353ae4664d2a02caa9ec4ccfb789870e4004519316714eef + v1.23.14: 2d71172abd71f3b1b3a8361c5cc55ec89b031052b2f91d64133b278e2b894a91 + v1.23.13: 444c646dc94dd7f7541a91ddc16a0da7259e345e1f84ec648077f447626844a2 + v1.23.12: e14a9dd3e3615e781d1de9000b250267eddfbab5ba46432ad2aa9108a5992e6a + v1.23.11: 64b02bc0f17b9df2b7ca8006d6cb6c1345f32fe6e748fcb6cbe9c4b406b116f6 + v1.23.10: a8f742b9b1c0b1a70719da6ea52e92d276b5ad6c59db0070aacdc474292c7e7a + v1.23.9: 6b05833c938c1d31e7450e93aebff561dfaa43eacafde1a011e0945ec2114fec + v1.23.8: f07b6194add802e2e5c5905a79ef744118ccb82ebcbf4e402a11bdb478de2c0f + v1.23.7: e011d7ad6aa01c5d1858ee88829d4a46b66dae10602615f46a7d4a0f9d9c2d6e + v1.23.6: 04461a5f75c2734ec5989f03bf72d766fb8d55021f1625b671bf805a62882089 + v1.23.5: 82e24cc48f23c0bfa3e90cce14b7ae0e0fb28a9ed9d2827e8ca503588f7ea1b5 + v1.23.4: f23611aea7130ba423268983ba1ce6db9451f69069dd16a8dbf013ab46237196 + v1.23.3: 055a9c9e8679c9ff963e43d1dc7d7aa3670a8aa56b96725de85c816e682c24bb + v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca + v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1 + v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753 +kubectl_checksums: + arm: + v1.25.5: fec9a0f7cd922744935dd5dfc2366ab307424ef4c533299d67edf7de15346e51 + v1.25.4: 49ab7f05bb27a710575c2d77982cbfb4a09247ec94a8e21af28a6e300b698a44 + v1.25.3: 59e1dba0951f19d4d18eb04db50fcd437c1d57460f2008bc03e668f71b8ea685 + v1.25.2: d6b581a41b010ef86a9364102f8612d2ee7fbc7dd2036e40ab7c85adb52331cb + v1.25.1: e8c6bfd8797e42501d14c7d75201324630f15436f712c4f7e46ce8c8067d9adc + v1.25.0: 0b907cfdcabafae7d2d4ac7de55e3ef814df999acdf6b1bd0ecf6abbef7c7131 + v1.24.9: a64fbc95696f982cb55622aeb9ef85a121b1473c8e52296768bb3d82ca53c85c + v1.24.8: b74c8ac75804fd35a14fab7f637acaf5c0cf94dfd0f5ce8d755104b1a1b2e43b + v1.24.7: 1829c5bb2ef30df6e46f99aa5c87a0f510a809f9169c725b3da08455bcf7f258 + v1.24.6: 7ca8fd7f5d6262668c20e3e639759e1976590ed4bd4fece62861dd376c2168de + v1.24.5: 3ca0fcb90b715f0c13eafe15c9100495a8648d459f1281f3340875d1b0b7e78f + v1.24.4: 060c0bb55aa3284c489cf8224ab10296d486b5a2e7f3e5d6440c9382698bf68a + v1.24.3: 4ae94095580973931da53fd3b823909d85ca05055d6300f392d9dc9e5748d612 + v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a + v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f + v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc + v1.23.15: 0fe6641715ee98a3d8899edd539322fa07762f8d65a35db23184ef06c1ff8111 + v1.23.14: 071f390f560320c4caff188d8f6f21c1b3258dfed600184f39d054d1d0673f69 + v1.23.13: c32baf45ad141f967b4877c7151aeee1ae296eebdbcb7a5200d418bd77c284b2 + v1.23.12: 94e946dcd1c2f7c8c9e3e022202762a36dab604b861b50bdcbdfb2c719731bd9 + v1.23.11: 6eaffb8f64929e888137366cf2aa7fd1df2cf851de4f96f62fe70ed4d79f0ef7 + v1.23.10: b2156478b03b90c0f72fd386ceab2e78b7cf32eab9d9b4696c28d2bb45c9d3ec + v1.23.9: 44caabd847c147ded79aa91daa49a5e0ea68ce4a0833b0733df1c8313375ff80 + v1.23.8: c4a2be3c61f40d4b1b0f61d509b0e361e85f10b7d2a98120d180c023ede7728f + v1.23.7: bc74849aabe50feb71333e41130ecf1122c0f79705a5fdc9d1ec2fce621bf749 + v1.23.6: 30d8e9656334b57e78c8dbc5d5f245a64b9a74c4fd03db47182fa7a21c2f5e32 + v1.23.5: 58420bc549e1683a4529066b38b2ac657611ed3b70041be78fba3b29401415db + v1.23.4: bde3d7801cfe444d4e226d4669dfd518e4687e16c99efddd016c4bf3d529b198 + v1.23.3: bc41382fbd3f6b33cb5ccb1819c5a38f2e6f3c9ce22acfedd6970b0b9b7748da + v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173 + v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5 + v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644 + arm64: + v1.25.5: 7bc650f28a5b4436df2abcfae5905e461728ba416146beac17a2634fa82a6f0a + v1.25.4: a8e9cd3c6ca80b67091fc41bc7fe8e9f246835925c835823a08a20ed9bcea1ba + v1.25.3: cfd5092ce347a69fe49c93681a164d9a8376d69eef587da894207c62ec7d6a5d + v1.25.2: b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5 + v1.25.1: 73602eabf20b877f88642fafcbe1eda439162c2c1dbcc9ed09fdd4d7ac9919ea + v1.25.0: 24db547bbae294c5c44f2b4a777e45f0e2f3d6295eace0d0c4be2b2dfa45330d + v1.24.9: f59c522cf5f9db826c64f28364946acb6bcb6957669291fa29b926b7812b5bbe + v1.24.8: b8ac2abfcb1fa04695d18098558ff483ec2c2488877b5abc4035a543544cdcb1 + v1.24.7: 4b138a11b13210ce1731e06918f8fff6709c004c6fb6bec28544713854de9fe8 + v1.24.6: 2f62e55960b02bb63cbc9154141520ac7cf0c2d55b45dd4a72867971e24a7219 + v1.24.5: a5e348758c0f2b22adeb1b663b4b66781bded895d8ea2a714eb1de81fb00907a + v1.24.4: 0aa4a08ff81efe3fc1a8ef880ca2f8622e3b1f93bf622583d7b9bfe3124afe61 + v1.24.3: bdad4d3063ddb7bfa5ecf17fb8b029d5d81d7d4ea1650e4369aafa13ed97149a + v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79 + v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271 + v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d + v1.23.15: f619f8b4811d60edef692f1d888609cc279a7d8223e50e1c0dc959c7b9250e79 + v1.23.14: 857716aa5cd24500349e5de8238060845af34b91ac4683bd279988ad3e1d3efa + v1.23.13: 950626ae35fca6c26096f97cac839d76e2f29616048ad30cec68f1ff003840f2 + v1.23.12: 88ebbc41252b39d49ce574a5a2bb25943bb82e55a252c27fe4fc096ce2dbb437 + v1.23.11: 9416cc7abaf03eb83f854a45a41986bf4e1232d129d7caafc3101a01ca11b0e3 + v1.23.10: d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b + v1.23.9: 66659f614d06d0fe80c5eafdba7073940906de98ea5ee2a081d84fa37d8c5a21 + v1.23.8: b293fce0b3dec37d3f5b8875b8fddc64e02f0f54f54dd7742368973c52530890 + v1.23.7: 5d59447a5facd8623a79c2a296a68a573789d2b102b902aafb3a730fc4bb0d3b + v1.23.6: 4be771c8e6a082ba61f0367077f480237f9858ef5efe14b1dbbfc05cd42fc360 + v1.23.5: 15cd560c04def7bbe5ee3f6f75e2cfd3913371c7e76354f4b2d5d6f536b70e39 + v1.23.4: aa45dba48791eeb78a994a2723c462d155af4e39fdcfbcb39ce9c96f604a967a + v1.23.3: 6708d7a701b3d9ab3b359c6be27a3012b1c486fa1e81f79e5bdc71ffca2c38f9 + v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b + v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524 + v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe + amd64: + v1.25.5: 6a660cd44db3d4bfe1563f6689cbe2ffb28ee4baf3532e04fff2d7b909081c29 + v1.25.4: e4e569249798a09f37e31b8b33571970fcfbdecdd99b1b81108adc93ca74b522 + v1.25.3: f57e568495c377407485d3eadc27cda25310694ef4ffc480eeea81dea2b60624 + v1.25.2: 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb + v1.25.1: 9cc2d6ce59740b6acf6d5d4a04d4a7d839b0a81373248ef0ce6c8d707143435b + v1.25.0: e23cc7092218c95c22d8ee36fb9499194a36ac5b5349ca476886b7edc0203885 + v1.24.9: 7e13f33b7379b6c25c3ae055e4389eb3eef168e563f37b5c5f1be672e46b686e + v1.24.8: f93c18751ec715b4d4437e7ece18fe91948c71be1f24ab02a2dde150f5449855 + v1.24.7: 2d88e56d668b1d7575b4783f22d512e94da432f42467c3aeac8a300b6345f12d + v1.24.6: 3ba7e61aecb19eadfa5de1c648af1bc66f5980526645d9dfe682d77fc313b74c + v1.24.5: 3037f2ec62956e7146fc86defb052d8d3b28e2daa199d7e3ff06d1e06a6286ed + v1.24.4: 4a76c70217581ba327f0ad0a0a597c1a02c62222bb80fbfea4f2f5cb63f3e2d8 + v1.24.3: 8a45348bdaf81d46caf1706c8bf95b3f431150554f47d444ffde89e8cdd712c1 + v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542 + v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a + v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7 + v1.23.15: adab29cf67e04e48f566ce185e3904b5deb389ae1e4d57548fcf8947a49a26f5 + v1.23.14: 13ce4b18ba6e15d5d259249c530637dd7fb9722d121df022099f3ed5f2bd74cd + v1.23.13: fae6957e6a7047ad49cdd20976cd2ce9188b502c831fbf61f36618ea1188ba38 + v1.23.12: b150c7c4830cc3be4bedd8998bf36a92975c95cd1967b4ef2d1edda080ffe5d9 + v1.23.11: cf04ad2fa1cf118a951d690af0afbbe8f5fc4f02c721c848080d466e6159111e + v1.23.10: 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7 + v1.23.9: 053561f7c68c5a037a69c52234e3cf1f91798854527692acd67091d594b616ce + v1.23.8: 299803a347e2e50def7740c477f0dedc69fc9e18b26b2f10e9ff84a411edb894 + v1.23.7: b4c27ad52812ebf3164db927af1a01e503be3fb9dc5ffa058c9281d67c76f66e + v1.23.6: 703a06354bab9f45c80102abff89f1a62cbc2c6d80678fd3973a014acc7c500a + v1.23.5: 715da05c56aa4f8df09cb1f9d96a2aa2c33a1232f6fd195e3ffce6e98a50a879 + v1.23.4: 3f0398d4c8a5ff633e09abd0764ed3b9091fafbe3044970108794b02731c72d6 + v1.23.3: d7da739e4977657a3b3c84962df49493e36b09cc66381a5e36029206dd1e01d0 + v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde + v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e + v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f + ppc64le: + v1.25.5: 816b6bfcbe312a4e6fbaaa459f52620af307683470118b9a4afb0f8e1054beb8 + v1.25.4: 23f5cec67088fa0c3efc17110ede5f6120d3ad18ad6b996846642c2f46b43da0 + v1.25.3: bd59ac682fffa37806f768328fee3cb791772c4a12bcb155cc64b5c81b6c47ce + v1.25.2: 1e3665de15a591d52943e6417f3102b5d413bc1d86009801ad0def04e8c920c5 + v1.25.1: 957170066abc4d4c178ac8d84263a191d351e98978b86b0916c1b8c061da8282 + v1.25.0: dffe15c626d7921d77e85f390b15f13ebc3a9699785f6b210cd13fa6f4653513 + v1.24.9: 8893337877ae82280fd52b3ef2c9ea6a1e477a9f6ee3b04ea3ddbd00da2c85a0 + v1.24.8: 9ed85938808b6ae52a2d0b5523dc3122a7dcf8857d609b7d79a1733c72344dc1 + v1.24.7: a68ec0c8ed579324037fc0a3bafa9d10184e6ff3ca34bfffdcb78f9f02bcb765 + v1.24.6: 448009693a97428aec7e60cc117079724f890e3a46d0aa54accdb56f33ca0f3d + v1.24.5: 0861df1c77336fbe569887a884d62a24fcb6486d43798a8767dba7e5865c3c98 + v1.24.4: cfd7151471dd9878d48ab8d7bc3cf945c207e130568ee778f1aed9ceb84afd44 + v1.24.3: 893a83cd636650d1ad50be0e9a2517f2f4434c35646dacd9160b66446aee404e + v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f + v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5 + v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75 + v1.23.15: ec5488895862a8c0c4a45558f395801ab40e55956831d9e56ade1dd1ba3968ec + v1.23.14: 291127abe519e4a1c0193960d361ba5a58c21cddb4cfff8ae4e67c001671849d + v1.23.13: 785d620dc77d10ce49218894225e935e55d08bb3842ae75c11cb41a814aca9ea + v1.23.12: f9a8efede8872c23c54c44f09657fa522e99786f3dc73ba7d6d928e9b3c7dc1a + v1.23.11: 52556d4e8ba19e8b0a65e4ac70203922b42b054647ec59a0177a2c4f61b903e7 + v1.23.10: fc0867d7412d7698029413a8307d8e74748d47e402c075e8d6cc79ed772fb232 + v1.23.9: 141532b62ce75860975d5913bfbf784a09b0abc83ca7d31a6b1eddf28866ce67 + v1.23.8: 599ed10fc7e8fcb5884485cecf690c7645947d1f144b66d717a3f064f11c0b8f + v1.23.7: dab46d2ede0a930f1530ebf857da538ca0879bdb72fc71070d518849c45b9fae + v1.23.6: 3fdba4f852046b0ee782048cad9c1fe4db9c98cb882ff78b5bca4632984c7700 + v1.23.5: d625dbea2879d12ca1c61b1c00084405a34514abaea1096110c8c8661cfac84f + v1.23.4: 1648768124315c5cbcfa6c24a31a34037558c09b91ead60267e13d6c7f3b597b + v1.23.3: 7297e595ed549bac93decda41c9830a3e032fd374467d679c98ef35dcdd1d2aa + v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015 + v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a + v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851 +kubeadm_checksums: + arm: + v1.25.5: c1753bffff88e3f192acc46f2ea4b7058a920c593f475cfb0ea015e6d9667ee1 + v1.25.4: a20379513e5d91073a52a0a3e7a9201e2d7b23daa55d68456465d8c9ef69427c + v1.25.3: 3f357e1e57936ec7812d35681be249b079bbdc1c7f13a75e6159379398e37d5e + v1.25.2: 2f794569c3322bb66309c7f67126b7f88155dfb1f70eea789bec0edf4e10015e + v1.25.1: ecb7a459ca23dfe527f4eedf33fdb0df3d55519481a8be3f04a5c3a4d41fa588 + v1.25.0: 67b6b58cb6abd5a4c9024aeaca103f999077ce6ec8e2ca13ced737f5139ad2f0 + v1.24.9: ba58cb05a6bcb7b974223df7de6b0af38e4eb78b944b775de5166337288cf172 + v1.24.8: 5117a0f3b652950bee328ee9583504fe50c012290436e56f6f4b9d7219ad2591 + v1.24.7: c0a9e6c08cad0b727f06bb3b539d55c65ea977be68fe471f6a9f73af3fbcb275 + v1.24.6: 760f0fc195f00ca3d1612e0974461ab937c25aa1e7a2f8d2357cd1336b2ecf3a + v1.24.5: 973f1ad7da9216fe3e0319a0c4fcb519a21a773cd39a0a445e689bea3d4a27c7 + v1.24.4: e0c1510ab2ed1cd555abad6f226454a3206aaaf20474da7dcf976ddc86a065d4 + v1.24.3: dc90c93e2305a7babafc41185a43435a9f3af2ef5d546bbd06e6553898e43d9e + v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266 + v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153 + v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2 + v1.23.15: 2e2a09d8e76202007b9bf97010b62de0867dfe6df83f155c3b703ea8ee0bc68f + v1.23.14: de222c7f05e90ae263b988e191a1b907c593c4ddc363277dae24d91ba694c731 + v1.23.13: 54d0f4d7a65abf610606b0538005ab5f177566587a81af6b0bc24ded2f8e305c + v1.23.12: 6da38118a7a1570ad76389f0492c11f8ae8e2068395773b89a2b0442d02e604c + v1.23.11: 4ea0f63d245d01eccc5c3f2c849e2c799392d5e37c9bc4c0ec7a06a5d3722622 + v1.23.10: e0db03e8c4c06c3c3e5e29558fa316b0b56ac9d2801751c4a36b2e3f84455b1f + v1.23.9: fa265d592d4f85b083919baa80b232deae20acaf2a20095a9c417c4d5324e002 + v1.23.8: 24d159ac19b519453050a977d2f238873c328e3a9dd3dfe524a32f421b64dadb + v1.23.7: 18da04d52a05f2b1b8cd7163bc0f0515a4ee793bc0019d2cada4bbf3323d4044 + v1.23.6: da2221f593e63195736659e96103a20e4b7f2060c3030e8111a4134af0d37cfb + v1.23.5: 9ea3e52cb236f446a33cf69e4ed6ac28a76103c1e351b2675cb9bfcb77222a61 + v1.23.4: 9ca72cf1e6bbbe91bf634a18571c84f3fc36ba5fcd0526b14432e87b7262a5ee + v1.23.3: cb2513531111241bfb0f343cff18f7b504326252ae080bb69ad1ccf3e31a2753 + v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e + v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369 + v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda + arm64: + v1.25.5: 426dddad1c60b7617f4095507cef524d76ec268a0201c1df154c108287a0b98e + v1.25.4: 3f5b273e8852d13fa39892a30cf64928465c32d0eb741118ba89714b51f03cd5 + v1.25.3: 61bb61eceff78b44be62a12bce7c62fb232ce1338928e4207deeb144f82f1d06 + v1.25.2: 437dc97b0ca25b3fa8d74b39e4059a77397b55c1a6d16bddfd5a889d91490ce0 + v1.25.1: f4d57d89c53b7fb3fe347c9272ed40ec55eab120f4f09cd6b684e97cb9cbf1f0 + v1.25.0: 07d9c6ffd3676502acd323c0ca92f44328a1f0e89a7d42a664099fd3016cf16b + v1.24.9: 57c61562a9de4cc78f276f665d7f04666607b17e3ad0fa6c14be64ad85c80951 + v1.24.8: 6f35562001e859f2a76a89c0da61f09433cc6628ccbc3992e82a977e0e348870 + v1.24.7: ee946d82173b63f69be9075e218250d4ab1deec39d17d600b16b6743e5dca289 + v1.24.6: 211b8d1881468bb673b26036dbcfa4b12877587b0a6260ffd55fd87c2aee6e41 + v1.24.5: a68c6dd24ef47825bb34a2ad430d76e6b4d3cbe92187363676993d0538013ac2 + v1.24.4: 18de228f6087a2e5243bffcd2cc88c40180a4fa83e4de310ad071b4620bdd8b6 + v1.24.3: ea0fb451b69d78e39548698b32fb8623fad61a1a95483fe0add63e3ffb6e31b5 + v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548 + v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04 + v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004 + v1.23.15: 8bb17c69ad71bb1230dbe1e598c6ae07390b57e3ba32928f28e83742105424d0 + v1.23.14: 7c21c1fa6a852b10ddea7bd1797ce8b4498d6898014d17d20748307e510a0826 + v1.23.13: 462971d5822c91598754dfaa9c4c8d46a8c74aefef0f4dbbc8be31c4f0d18855 + v1.23.12: d05f6765a65f7541d07aad989ee80cd730c395f042afbe0526f667ea1a0b2947 + v1.23.11: 329d9aa9461baf4a7b7225e664ec1ecd61512b937e1f160f9a303bc0f0d44bbb + v1.23.10: 42e957eebef78f6462644d9debc096616054ebd2832e95a176c07c28ebed645c + v1.23.9: a0a007023db78e5f78d3d4cf3268b83f093201847c1c107ffb3dc695f988c113 + v1.23.8: 9b3d8863ea4ab0438881ccfbe285568529462bc77ef4512b515397a002d81b22 + v1.23.7: 65fd71aa138166039b7f4f3695308064abe7f41d2f157175e6527e60fb461eae + v1.23.6: a4db7458e224c3a2a7b468fc2704b31fec437614914b26a9e3d9efb6eecf61ee + v1.23.5: 22a8468abc5d45b3415d694ad52cc8099114248c3d1fcf4297ec2b336f5cc274 + v1.23.4: 90fd5101e321053cdb66d165879a9cde18f19ba9bb8eae152fd4f4fcbe497be1 + v1.23.3: 5eceefa3ca737ff1532f91bdb9ef7162882029a2a0300b4348a0980249698398 + v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff + v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40 + v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6 + amd64: + v1.25.5: af0b25c7a995c2d208ef0b9d24b70fe6f390ebb1e3987f4e0f548854ba9a3b87 + v1.25.4: b8a6119d2a3a7c6add43dcf8f920436bf7fe71a77a086e96e40aa9d6f70be826 + v1.25.3: 01b59ce429263c62b85d2db18f0ccdef076b866962ed63971ff2bd2864deea7b + v1.25.2: 63ee3de0c386c6f3c155874b46b07707cc72ce5b9e23f336befd0b829c1bd2ad + v1.25.1: adaa1e65c1cf9267a01e889d4c13884f883cf27948f00abb823f10486f1a8420 + v1.25.0: 10b30b87af2cdc865983d742891eba467d038f94f3926bf5d0174f1abf6628f8 + v1.24.9: 20406971ae71886f7f8ee7b9a33c885391ae64da561fb679d5819f2ccc19ac9f + v1.24.8: 9fea42b4fb5eb2da638d20710ebb791dde221e6477793d3de70134ac058c4cc7 + v1.24.7: 8b67319d28bf37e8e7c224954dc778cbe946f2bb0ed86975d8caa83d51c955ee + v1.24.6: 7f4443fd42e0e03f6fd0c7218ca7e2634c9255d5f9d7c581fe362e19098aec4c + v1.24.5: 3b9c1844ec0fc3c94015d63470b073a7b219082b6a6424c6b0da9cf97e234aeb + v1.24.4: 9ec08e0905c0a29a68676ba9f6dd7de73bef13cfa2b846a45e1c2189572dc57c + v1.24.3: 406d5a80712c45d21cdbcc51aab298f0a43170df9477259443d48eac116998ff + v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712 + v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0 + v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318 + v1.23.15: 63329e21be8367628f71978cfc140c74ce9cb0336abd9c4802ca7d20d5dec3c3 + v1.23.14: 46c847e2699839b9ccf6673f0b946c4778a3a2e8e463d15854ba30d3f0cbd87a + v1.23.13: ff86af2b5fa979234dd3f9e7b04ec7d3017239a58417397153726d8077c4ac89 + v1.23.12: bf45d00062688d21ff479bf126e1259d0ce3dee1c5c2fcd803f57497cd5e9e83 + v1.23.11: 2f10bd298a694d3133ea19192b796a106c282441e4148c114c39376042097692 + v1.23.10: 43d186c3c58e3f8858c6a22bc71b5441282ac0ccbff6f1d0c2a66ee045986b64 + v1.23.9: 947571c50ab840796fdd4ffb129154c005dfcb0fe83c6eff392d46cf187fd296 + v1.23.8: edbd60fd6a7e11c71f848b3a6e5d1b5a2bb8ebd703e5490caa8db267361a7b89 + v1.23.7: d7d863213eeb4791cdbd7c5fd398cf0cc2ef1547b3a74de8285786040f75efd2 + v1.23.6: 9213c7d738e86c9a562874021df832735236fcfd5599fd4474bab3283d34bfd7 + v1.23.5: 8eebded187ee84c97003074eaa347e34131fef3acdf3e589a9b0200f94687667 + v1.23.4: c91912c9fd34a50492f889e08ff94c447fdceff150b588016fecc9051a1e56b8 + v1.23.3: 57ec7f2921568dcf4cda0699b877cc830d49ddd2709e035c339a5afc3b83586f + v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219 + v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34 + v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0 + ppc64le: + v1.25.5: d69b73af9e327cba5c771daf8320821ccda703f38506ee4ec5b1ff3776a6eb8f + v1.25.4: 9703e40cb0df48052c3cfb0afc85dc582e600558ab687d6409f40c382f147976 + v1.25.3: 8fe9a69db91c779a8f29b216134508ba49f999fa1e36b295b99444f31266da17 + v1.25.2: a53101ed297299bcf1c4f44ec67ff1cb489ab2d75526d8be10c3068f161601a7 + v1.25.1: c7e2c8d2b852e1b30894b64875191ce388a3a416d41311b21f2d8594872fe944 + v1.25.0: 31bc72e892f3a6eb5db78003d6b6200ba56da46a746455991cb422877afc153d + v1.24.9: abf04047a45f602e455ab7df92ae5500b543fe5ef13fb67d050f3d28dfd1906c + v1.24.8: eccd3fd892b253a8632f3c4a917c19fff4982dd436f8f7de94868a0062c0bf2b + v1.24.7: 29a53be9a74dcb01ea68b0a385bdd9b510f9792955f9f7c93ed608c851b5dc32 + v1.24.6: 9d73bfde24ee9781fcca712658f297a041408b534f875f5e093222ed64c91c15 + v1.24.5: f416c45ca5826ea3ff13be393911424a0fba3aa30b5557d3d32541551566142a + v1.24.4: 00fe93a291ddca28188056e597fc812b798706ea19b2da6f8aaf688f6ea95c0e + v1.24.3: 1cb40441d8982362c6d4ffdd9a980a4563dcc5cccc1bb1d7370f0bd7340484d2 + v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb + v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137 + v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c + v1.23.15: 18eaf8177720fbed8c09d3e83e6066891ca9fc629986b35a2012cafe9febd5d0 + v1.23.14: 529811ef359095fe33a1d94d20fca312c25a1513baf799513c47711d34bd73ad + v1.23.13: 3dbf72fdfc108bf41cab151ac340b336ba17b14fa008b15d84ce223b30391914 + v1.23.12: ccae0a4c81a60e50219954393432c5f4d4692847c866ca497a48a1118f417d0d + v1.23.11: 9930cfb4ae7663f145c1d08e06c49ab60e28a6613ac5c7b19d047f15c1e24c22 + v1.23.10: c9f484bd8806f50ce051a28776ef92e3634a1cdc0a47c9483ee77c34cde845c1 + v1.23.9: 03643613aa6afc6251270adc7681029d4fc10e8a75d553a1d8e63cf5b5a2a8fe + v1.23.8: dcfb69f564b34942136cc4cc340b1c800e3e610292e517e68ab5e0157b9510af + v1.23.7: 525d43db6d24ac048606cb63ff0f737d87473deff66d4c43ed5ae716ed4fb263 + v1.23.6: 0b975ac27fa794134a5a25dfbf6df598e2b62e483134326788443131f6d8e5e4 + v1.23.5: bec93d18fd5e5ef6d5da3d18edb282e58a64ff34ec3544d82dc31a3255d9ed1d + v1.23.4: 9c681254bf7cfce8b94326364d677f1944c0afb070f666f7fd438bd37133f7cc + v1.23.3: fd87d972db45dd6f623dd4ca06075e7e697f1bdaa7936c5c06924d1189ba7ff8 + v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01 + v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc + v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1 + +etcd_binary_checksums: + # Etcd does not have arm32 builds at the moment, having some dummy value is + # required to avoid "no attribute" error + arm: + v3.5.6: 0 + arm64: + v3.5.6: 888e25c9c94702ac1254c7655709b44bb3711ebaabd3cb05439f3dd1f2b51a87 + amd64: + v3.5.6: 4db32e3bc06dd0999e2171f76a87c1cffed8369475ec7aa7abee9023635670fb + ppc64le: + v3.5.6: e235cb885996b8aac133975e0077eaf0a2f8dc7062ad052fa7395668a365906b + +cni_binary_checksums: + arm: + v1.0.1: d35e3e9fd71687fc7e165f7dc7b1e35654b8012995bbfd937946b0681926d62d + v1.1.1: 84f97baf80f9670a8cd0308dedcc8405d2bbc65166d670b48795e0d1262b4248 + arm64: + v1.0.1: 2d4528c45bdd0a8875f849a75082bc4eafe95cb61f9bcc10a6db38a031f67226 + v1.1.1: 16484966a46b4692028ba32d16afd994e079dc2cc63fbc2191d7bfaf5e11f3dd + amd64: + v1.0.1: 5238fbb2767cbf6aae736ad97a7aa29167525dcd405196dfbc064672a730d3cf + v1.1.1: b275772da4026d2161bf8a8b41ed4786754c8a93ebfb6564006d5da7f23831e5 + ppc64le: + v1.0.1: f078e33067e6daaef3a3a5010d6440f2464b7973dec3ca0b5d5be22fdcb1fd96 + v1.1.1: 1551259fbfe861d942846bee028d5a85f492393e04bcd6609ac8aaa7a3d71431 + +calicoctl_binary_checksums: + arm: + v3.24.5: 0 + v3.23.3: 0 + v3.22.4: 0 + v3.21.6: 0 + amd64: + v3.24.5: 01e6c8a2371050f9edd0ade9dcde89da054e84d8e96bd4ba8cf82806c8d3e8e7 + v3.23.3: d9c04ab15bad9d8037192abd2aa4733a01b0b64a461c7b788118a0d6747c1737 + v3.22.4: cc412783992abeba6dc01d7bc67bdb2e3a0cf2f27fc3334bdfc02d326c3c9e15 + v3.21.6: 20335301841ba1dd0795e834ecce0d8e6b89f0b01d781dcc95339419462b3b67 + arm64: + v3.24.5: 2d56b768ed346129b0249261db27d97458cfb35f98bd028a0c817a23180ab2d2 + v3.23.3: 741b222f9bb10b7b5e268e5362796061c8862d4f785bb6b9c4f623ea143f4682 + v3.22.4: e84ba529091818282012fd460e7509995156e50854781c031c81e4f6c715a39a + v3.21.6: 8f4ca86e21364eb23fb4676a0a1ed9e751c8a044360b22eae9ee6af7e81c3d59 + ppc64le: + v3.24.5: 4c40d1703a31eb1d1786287fbf295d614eb9594a4748e505a03a2fbb6eda85b4 + v3.23.3: f83efcd8d3d7c96dfe8e596dc9739eb5d9616626a6afba29b0af97e5c222575a + v3.22.4: f8672ac27ab72c1b05b0f9ae5694881ef8e061bfbcf551f964e7f0a37090a243 + v3.21.6: f7aad0409de2838ba691708943a2aeeef6fb9c02a0475293106e179dc48a4632 + +ciliumcli_binary_checksums: + arm: + v0.12.4: 8e0596d321c97a55449942c2ebd8bb0102dc6a9381919287e383b679cee8f524 + v0.12.5: 1c9a8cf8df62eb814d6c90f6ad6a1c074f991fde5b5573059d27729f12619496 + amd64: + v0.12.4: 6b4f899fa09b6558a89a32ace3be4dedca08b7f4b76f04931ed1ffb2de8965e2 + v0.12.5: 6b2c9031e4264482b18873ad337394442b8787d6ac26e16e865d36f320c650f0 + arm64: + v0.12.4: e037f34fded56e4199e9e7ff1ce623d2516be7116a6490e02377f786acec5bda + v0.12.5: b779d4b04b23fcae30cc158ce9d29e2cad0c98bd88582c0a2c8d457c71d5c4b3 + ppc64le: + v0.12.4: 0 + v0.12.5: 0 + +calico_crds_archive_checksums: + v3.24.5: 10320b45ebcf4335703d692adacc96cdd3a27de62b4599238604bd7b0bedccc3 + v3.23.3: d25f5c9a3adeba63219f3c8425a8475ebfbca485376a78193ec1e4c74e7a6115 + v3.22.4: e72e7b8b26256950c1ce0042ac85fa83700154dae9723c8d007de88343f6a7e5 + v3.21.6: db4fa80b79b39853f0b1a04d875c110b637dd8754bf7b4cec06ae510fb8a2acd + +krew_archive_checksums: + linux: + arm: + v0.4.3: 68eb9e9f5bba29c7c19fb52bfc43a31300f92282a4e81f0c51ad26ed2c73eb03 + arm64: + v0.4.3: 0994923848882ad0d4825d5af1dc227687a10a02688f785709b03549dd34d71d + amd64: + v0.4.3: 5df32eaa0e888a2566439c4ccb2ef3a3e6e89522f2f2126030171e2585585e4f + ppc64le: + v0.4.3: 0 + + darwin: + arm: + v0.4.3: 0 + arm64: + v0.4.3: 22f29ce3c3c9c030e2eaf3939d2b00f0187dfdbbfaee37fba8ffaadc46e51372 + amd64: + v0.4.3: 6f6a774f03ad4190a709d7d4dcbb4af956ca0eb308cb0d0a44abc90777b0b21a + ppc64le: + v0.4.3: 0 + + windows: + arm: + v0.4.3: 0 + arm64: + v0.4.3: 0 + amd64: + v0.4.3: d1343a366a867e9de60b23cc3d8ee935ee185af25ff8f717a5e696ba3cae7c85 + ppc64le: + v0.4.3: 0 + + +helm_archive_checksums: + arm: + v3.10.3: dca718eb68c72c51fc7157c4c2ebc8ce7ac79b95fc9355c5427ded99e913ec4c + arm64: + v3.10.3: 260cda5ff2ed5d01dd0fd6e7e09bc80126e00d8bdc55f3269d05129e32f6f99d + amd64: + v3.10.3: 950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 + ppc64le: + v3.10.3: 93cdf398abc68e388d1b46d49d8e1197544930ecd3e81cc58d0a87a4579d60ed + +cri_dockerd_archive_checksums: + arm: + 0.2.2: 0 + arm64: + 0.2.2: 30e5fb2f06bd1e9fff6eddc185356cf3636d36c6c310bbd5892141e2b8e86ee3 + amd64: + 0.2.2: fbf0fe66805e0104841d0093c6ad74a5e39264616855d902a97c1ba7830855e1 + ppc64le: + 0.2.2: 0 + +runc_checksums: + arm: + v1.1.0: 0 + v1.1.1: 0 + v1.1.2: 0 + v1.1.3: 0 + v1.1.4: 0 + arm64: + v1.1.0: 9ec8e68feabc4e7083a4cfa45ebe4d529467391e0b03ee7de7ddda5770b05e68 + v1.1.1: 20c436a736547309371c7ac2a335f5fe5a42b450120e497d09c8dc3902c28444 + v1.1.2: 6ebd968d46d00a3886e9a0cae2e0a7b399e110cf5d7b26e63ce23c1d81ea10ef + v1.1.3: 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f + v1.1.4: dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223 + amd64: + v1.1.0: ab1c67fbcbdddbe481e48a55cf0ef9a86b38b166b5079e0010737fd87d7454bb + v1.1.1: 5798c85d2c8b6942247ab8d6830ef362924cd72a8e236e77430c3ab1be15f080 + v1.1.2: e0436dfc5d26ca88f00e84cbdab5801dd9829b1e5ded05dcfc162ce5718c32ce + v1.1.3: 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01 + v1.1.4: db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce + ppc64le: + v1.1.0: 4a6b2f43c0f2371b1948b2eceb906fd8b9d8f5e9f6bab7d21bc037f5b300f43e + v1.1.1: 5f14bca6e35177134251dfd3c44bccb81136d9043508e7a37494ad9485f5f0e4 + v1.1.2: 545ac8165646ed2b157fae677dd6509baf10e370ebe67c23b2f800163fa97150 + v1.1.3: 3b1b7f953fc8402dec53dcf2de05b6b72d86850737efa9766f8ffefc7cae3c0a + v1.1.4: 0f7fb3d2426b6012d9b33c354c778c0ffbce02c329c4c16c1189433a958fd60d + +crun_checksums: + arm: + 1.4.3: 0 + 1.4.4: 0 + 1.4.5: 0 + amd64: + 1.4.3: 6255325b641be6a3cfb33b5bd7790c035337ad18b9c012d0fbe0e4173a2dbd29 + 1.4.4: 73f7f89a98f69c0bf0e9fe1e0129201d5b72529785b4b1bcb4d43c31d0c3a8ea + 1.4.5: 84cf20a6060cd53ac21a0590367d1ab65f74baae005c42f2d5bc1af918470455 + arm64: + 1.4.3: f4f328c99f879273ed475f6f7766904948808de0268a48d92e6e0f2038a1989d + 1.4.4: 2ad2c02ec0b1566f1c5e85223b726b704904cc75c2eb4af298e95b98fe5c166d + 1.4.5: 64a01114060ec12e66b1520c6ee6967410022d1ec73cdc7d14f952343c0769f2 + ppc64le: + 1.4.3: 0 + 1.4.4: 0 + 1.4.5: 0 + +youki_checksums: + arm: + 0.0.1: 0 + amd64: + 0.0.1: 8bd712fe95c8a81194bfbc54c70516350f95153d67044579af95788fbafd943b + arm64: + 0.0.1: 0 + ppc64le: + 0.0.1: 0 + +kata_containers_binary_checksums: + arm: + 2.0.4: 0 + 2.1.1: 0 + 2.2.2: 0 + 2.2.3: 0 + 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 + amd64: + 2.0.4: 022a60c2d92a5ab9a5eb83d5a95154a2d06fdc2206b2a473d902ccc86766371a + 2.1.1: a83591d968cd0f1adfb5025d7aa33ca1385d4b1165ff10d74602302fc3c0373f + 2.2.2: 2e3ac77b8abd4d839cf16780b57aee8f3d6e1f19489edd7d6d8069ea3cc3c18a + 2.2.3: e207ab5c8128b50fe61f4f6f98fd34af0fa5ebc0793862be6d13a2674321774f + 2.3.0: 430fa55b387b3bafbbabb7e59aa8c809927a22f8d836732a0719fd2e1d131b31 + 2.4.0: fca40fa4e91efc79c75367ffe09ca32ad795d302aacb91992874f40bfc00348f + 2.4.1: e234ffce779d451dc2a170b394b91d35b96e44ea50dc4a3256defa603efdf607 + arm64: + 2.0.4: 0 + 2.1.1: 0 + 2.2.2: 0 + 2.2.3: 0 + 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 + ppc64le: + 2.0.4: 0 + 2.1.1: 0 + 2.2.2: 0 + 2.2.3: 0 + 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 + +gvisor_runsc_binary_checksums: + arm: + 20210921: 0 + arm64: + 20210921: 74a916dcd64a7a8347d91c882701363cf2721d53f0db237f0c0b2d708d661e55 + amd64: + 20210921: af5b4527b2d63eea6d0cc2c5522b1e76163df695e9484475e378ec29f7baa661 + ppc64le: + 20210921: 0 + +gvisor_containerd_shim_binary_checksums: + arm: + 20210921: 0 + arm64: + 20210921: 51e466a05256eb2d40fe3cc987ec486212df4af6f79e53630dfd822b9bc1fb2f + amd64: + 20210921: 9ed085fcdbf6f300474e10f2f32b323038568342ce8130298f56e13d14484daa + ppc64le: + 20210921: 0 + +nerdctl_archive_checksums: + arm: + 1.0.0: 8fd283a2f2272b15f3df43cd79642c25f19f62c3c56ad58bb68afb7ed92904c2 + arm64: + 1.0.0: 27622c9d95efe6d807d5f3770d24ddd71719c6ae18f76b5fc89663a51bcd6208 + amd64: + 1.0.0: 3e993d714e6b88d1803a58d9ff5a00d121f0544c35efed3a3789e19d6ab36964 + ppc64le: + 1.0.0: 2fb02e629a4be16b194bbfc64819132a72ede1f52596bd8e1ec2beaf7c28c117 + +containerd_archive_checksums: + arm: + 1.5.5: 0 + 1.5.7: 0 + 1.5.8: 0 + 1.5.9: 0 + 1.5.10: 0 + 1.5.11: 0 + 1.5.12: 0 + 1.5.13: 0 + 1.6.0: 0 + 1.6.1: 0 + 1.6.2: 0 + 1.6.3: 0 + 1.6.4: 0 + 1.6.5: 0 + 1.6.6: 0 + 1.6.7: 0 + 1.6.8: 0 + 1.6.9: 0 + 1.6.10: 0 + 1.6.11: 0 + 1.6.12: 0 + 1.6.13: 0 + 1.6.14: 0 + arm64: + 1.5.5: 0 + 1.5.7: 0 + 1.5.8: 0 + 1.5.9: 0 + 1.5.10: 0 + 1.5.11: 0 + 1.5.12: 0 + 1.5.13: 0 + 1.6.0: 6eff3e16d44c89e1e8480a9ca078f79bab82af602818455cc162be344f64686a + 1.6.1: fbeec71f2d37e0e4ceaaac2bdf081295add940a7a5c7a6bcc125e5bbae067791 + 1.6.2: a4b24b3c38a67852daa80f03ec2bc94e31a0f4393477cd7dc1c1a7c2d3eb2a95 + 1.6.3: 354e30d52ff94bd6cd7ceb8259bdf28419296b46cf5585e9492a87fdefcfe8b2 + 1.6.4: 0205bd1907154388dc85b1afeeb550cbb44c470ef4a290cb1daf91501c85cae6 + 1.6.5: 2833e2f0e8f3cb5044566d64121fdd92bbdfe523e9fe912259e936af280da62a + 1.6.6: 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb + 1.6.7: 4167bf688a0ed08b76b3ac264b90aad7d9dd1424ad9c3911e9416b45e37b0be5 + 1.6.8: b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd + 1.6.9: 140197aee930a8bd8a69ff8e0161e56305751be66e899dccd833c27d139f4f47 + 1.6.10: 6d655e80a843f480e1c1cead18479185251581ff2d4a2e2e5eb88ad5b5e3d937 + 1.6.11: 1b34d8ff067da482af021dac325dc4e993d7356c0bd9dc8e5a3bb8271c1532de + 1.6.12: 0a0133336596b2d1dcafe3587eb91ab302afc28f273614e0e02300694b5457a0 + 1.6.13: 8c7892ae7c2e96a4a9358b1064fb5519a5c0528b715beee67b72e74d7a644064 + 1.6.14: 3ccb61218e60cbba0e1bbe1e5e2bf809ac1ead8eafbbff36c3195d3edd0e4809 + amd64: + 1.5.5: 8efc527ffb772a82021800f0151374a3113ed2439922497ff08f2596a70f10f1 + 1.5.7: 109fc95b86382065ea668005c376360ddcd8c4ec413e7abe220ae9f461e0e173 + 1.5.8: feeda3f563edf0294e33b6c4b89bd7dbe0ee182ca61a2f9b8c3de2766bcbc99b + 1.5.9: a457793a1643657588baf46d3ffbf44fae0139b65076064e237ddf29cd838ba4 + 1.5.10: 44f809e02233a510bb9d136906849e9ed058aa1d3d714244376001ab77464db7 + 1.5.11: f2a2476ca44a24067488cd6d0b064b2128e01f6f53e5f29c5acfaf1520927ee2 + 1.5.12: 301833f6377e9471a2cf1a1088ba98826db7e8fe9d3ffdc9f570b0638bcd3a1f + 1.5.13: 7b5b34f30a144985e849bdeb0921cfd3fe65f9508b5707fd237fd2c308d9abae + 1.6.0: f77725e4f757523bf1472ec3b9e02b09303a5d99529173be0f11a6d39f5676e9 + 1.6.1: c1df0a12af2be019ca2d6c157f94e8ce7430484ab29948c9805882df40ec458b + 1.6.2: 3d94f887de5f284b0d6ee61fa17ba413a7d60b4bb27d756a402b713a53685c6a + 1.6.3: 306b3c77f0b5e28ed10d527edf3d73f56bf0a1fb296075af4483d8516b6975ed + 1.6.4: f23c8ac914d748f85df94d3e82d11ca89ca9fe19a220ce61b99a05b070044de0 + 1.6.5: cf02a2da998bfcf61727c65ede6f53e89052a68190563a1799a7298b0cea86b4 + 1.6.6: 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef + 1.6.7: 52e817b712d521b193773529ff33626f47507973040c02474a2db95a37da1c37 + 1.6.8: 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e + 1.6.9: 9ee2644bfb95b23123f96b564df2035ec94a46f64060ae12322e09a8ec3c2b53 + 1.6.10: dd1f4730daf728822aea3ba35a440e14b1dfa8f1db97288a59a8666676a13637 + 1.6.11: 21870d7022c52f5f74336d440deffb208ba747b332a88e6369e2aecb69382e48 + 1.6.12: a56c39795fd0d0ee356b4099a4dfa34689779f61afc858ef84c765c63e983a7d + 1.6.13: 97f00411587512e62ec762828e581047b23199f8744754706d09976ec24a2736 + 1.6.14: 7da626d46c4edcae1eefe6d48dc6521db3e594a402715afcddc6ac9e67e1bfcd + ppc64le: + 1.5.5: 0 + 1.5.7: 0 + 1.5.8: 0 + 1.5.9: 0 + 1.5.10: 0 + 1.5.11: 0 + 1.5.12: 0 + 1.5.13: 0 + 1.6.0: 0 + 1.6.1: 0 + 1.6.2: 0 + 1.6.3: 0 + 1.6.4: 0 + 1.6.5: 0 + 1.6.6: 0 + 1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9 + 1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8 + 1.6.9: fe0046437cfe971ef0b3101ee69fcef5cf52e8868de708d35f8b82f998044f6e + 1.6.10: 704b1affd306b807fe6b4701d778129283635c576ecedc6d0a9da5370a07d56a + 1.6.11: e600a5714ffb29937b3710f9ae81bb7aa15b7b6661192f5e8d0b9b58ac6d5e66 + 1.6.12: 088e4d1fe1787fc4a173de24a58da01880d1ead5a13f1ab55e1ade972d3907d4 + 1.6.13: f2508ada0c8bd7d3cb09b0e7f10416aba3d643c0da7adc27efe4e76d444322ae + 1.6.14: 73025da0666079fc3bbd48cf185da320955d323c7dc42d8a4ade0e7926d62bb0 +skopeo_binary_checksums: + arm: + v1.10.0: 0 + arm64: + v1.10.0: 3bfc344d4940df29358f8056de7b8dd488b88a5d777b3106748ba66851fa2c58 + amd64: + v1.10.0: 20fbd1bac1d33768c3671e4fe9d90c5233d7e13a40e4935b4b24ebc083390604 + ppc64l3: + v1.10.0: 0 + +etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}" +cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}" +kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}" +kubectl_binary_checksum: "{{ kubectl_checksums[image_arch][kube_version] }}" +kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}" +calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}" +calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}" +ciliumcli_binary_checksum: "{{ ciliumcli_binary_checksums[image_arch][cilium_cli_version] }}" +crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}" +crio_archive_checksum: "{{ crio_archive_checksums[image_arch][crio_version] }}" +cri_dockerd_archive_checksum: "{{ cri_dockerd_archive_checksums[image_arch][cri_dockerd_version] }}" +helm_archive_checksum: "{{ helm_archive_checksums[image_arch][helm_version] }}" +runc_binary_checksum: "{{ runc_checksums[image_arch][runc_version] }}" +crun_binary_checksum: "{{ crun_checksums[image_arch][crun_version] }}" +youki_archive_checksum: "{{ youki_checksums[image_arch][youki_version] }}" +kata_containers_binary_checksum: "{{ kata_containers_binary_checksums[image_arch][kata_containers_version] }}" +gvisor_runsc_binary_checksum: "{{ gvisor_runsc_binary_checksums[image_arch][gvisor_version] }}" +gvisor_containerd_shim_binary_checksum: "{{ gvisor_containerd_shim_binary_checksums[image_arch][gvisor_version] }}" +nerdctl_archive_checksum: "{{ nerdctl_archive_checksums[image_arch][nerdctl_version] }}" +krew_archive_checksum: "{{ krew_archive_checksums[host_os][image_arch][krew_version] }}" +containerd_archive_checksum: "{{ containerd_archive_checksums[image_arch][containerd_version] }}" +skopeo_binary_checksum: "{{ skopeo_binary_checksums[image_arch][skopeo_version] }}" + +# Containers +# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker, +# it helps a lot for local private development or bare metal environment. +# So you need define --registry-mirror or --insecure-registry, and modify the following url address. +# example: +# You need to deploy kubernetes cluster on local private development. +# Also provide the address of your own private registry. +# And use --insecure-registry options for docker +kube_proxy_image_repo: "{{ kube_image_repo }}/kube-proxy" +etcd_image_repo: "{{ quay_image_repo }}/coreos/etcd" +etcd_image_tag: "{{ etcd_version }}" +flannel_image_repo: "{{ docker_image_repo }}/flannelcni/flannel" +flannel_image_tag: "{{ flannel_version }}-{{ image_arch }}" +flannel_init_image_repo: "{{ docker_image_repo }}/flannelcni/flannel-cni-plugin" +flannel_init_image_tag: "{{ flannel_cni_version }}-{{ image_arch }}" +calico_node_image_repo: "{{ quay_image_repo }}/calico/node" +calico_node_image_tag: "{{ calico_version }}" +calico_cni_image_repo: "{{ quay_image_repo }}/calico/cni" +calico_cni_image_tag: "{{ calico_cni_version }}" +calico_flexvol_image_repo: "{{ quay_image_repo }}/calico/pod2daemon-flexvol" +calico_flexvol_image_tag: "{{ calico_flexvol_version }}" +calico_policy_image_repo: "{{ quay_image_repo }}/calico/kube-controllers" +calico_policy_image_tag: "{{ calico_policy_version }}" +calico_typha_image_repo: "{{ quay_image_repo }}/calico/typha" +calico_typha_image_tag: "{{ calico_typha_version }}" +calico_apiserver_image_repo: "{{ quay_image_repo }}/calico/apiserver" +calico_apiserver_image_tag: "{{ calico_apiserver_version }}" +pod_infra_image_repo: "{{ kube_image_repo }}/pause" +pod_infra_image_tag: "{{ pod_infra_version }}" +netcheck_version: "v1.2.2" +netcheck_agent_image_repo: "{{ docker_image_repo }}/mirantis/k8s-netchecker-agent" +netcheck_agent_image_tag: "{{ netcheck_version }}" +netcheck_server_image_repo: "{{ docker_image_repo }}/mirantis/k8s-netchecker-server" +netcheck_server_image_tag: "{{ netcheck_version }}" +netcheck_etcd_image_tag: "v3.4.17" +weave_kube_image_repo: "{{ docker_image_repo }}/weaveworks/weave-kube" +weave_kube_image_tag: "{{ weave_version }}" +weave_npc_image_repo: "{{ docker_image_repo }}/weaveworks/weave-npc" +weave_npc_image_tag: "{{ weave_version }}" +cilium_image_repo: "{{ quay_image_repo }}/cilium/cilium" +cilium_image_tag: "{{ cilium_version }}" +cilium_operator_image_repo: "{{ quay_image_repo }}/cilium/operator" +cilium_operator_image_tag: "{{ cilium_version }}" +cilium_hubble_relay_image_repo: "{{ quay_image_repo }}/cilium/hubble-relay" +cilium_hubble_relay_image_tag: "{{ cilium_version }}" +cilium_hubble_certgen_image_repo: "{{ quay_image_repo }}/cilium/certgen" +cilium_hubble_certgen_image_tag: "v0.1.8" +cilium_hubble_ui_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui" +cilium_hubble_ui_image_tag: "v0.9.2" +cilium_hubble_ui_backend_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui-backend" +cilium_hubble_ui_backend_image_tag: "v0.9.2" +cilium_hubble_envoy_image_repo: "{{ docker_image_repo }}/envoyproxy/envoy" +cilium_hubble_envoy_image_tag: "v1.22.5" +kube_ovn_container_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn" +kube_ovn_container_image_tag: "{{ kube_ovn_version }}" +kube_ovn_dpdk_container_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn-dpdk" +kube_ovn_dpdk_container_image_tag: "{{ kube_ovn_dpdk_version }}" +kube_router_image_repo: "{{ docker_image_repo }}/cloudnativelabs/kube-router" +kube_router_image_tag: "{{ kube_router_version }}" +multus_image_repo: "{{ github_image_repo }}/k8snetworkplumbingwg/multus-cni" +multus_image_tag: "{{ multus_version }}" + +kube_vip_image_repo: "{{ github_image_repo }}/kube-vip/kube-vip" +kube_vip_image_tag: v0.5.5 +nginx_image_repo: "{{ docker_image_repo }}/library/nginx" +nginx_image_tag: 1.23.2-alpine +haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy" +haproxy_image_tag: 2.6.6-alpine + +# Coredns version should be supported by corefile-migration (or at least work with) +# bundle with kubeadm; if not 'basic' upgrade can sometimes fail + +coredns_version: "v1.9.3" +coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}" + +coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}" +coredns_image_tag: "{{ coredns_version if (coredns_image_is_namespaced | bool) else (coredns_version | regex_replace('^v', '')) }}" + +nodelocaldns_version: "1.21.1" +nodelocaldns_image_repo: "{{ kube_image_repo }}/dns/k8s-dns-node-cache" +nodelocaldns_image_tag: "{{ nodelocaldns_version }}" + +dnsautoscaler_version: 1.8.5 +dnsautoscaler_image_repo: "{{ kube_image_repo }}/cpa/cluster-proportional-autoscaler-{{ image_arch }}" +dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}" + +registry_version: "2.8.1" +registry_image_repo: "{{ docker_image_repo }}/library/registry" +registry_image_tag: "{{ registry_version }}" +metrics_server_version: "v0.6.2" +metrics_server_image_repo: "{{ kube_image_repo }}/metrics-server/metrics-server" +metrics_server_image_tag: "{{ metrics_server_version }}" +local_volume_provisioner_version: "v2.5.0" +local_volume_provisioner_image_repo: "{{ kube_image_repo }}/sig-storage/local-volume-provisioner" +local_volume_provisioner_image_tag: "{{ local_volume_provisioner_version }}" +cephfs_provisioner_version: "v2.1.0-k8s1.11" +cephfs_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/cephfs-provisioner" +cephfs_provisioner_image_tag: "{{ cephfs_provisioner_version }}" +rbd_provisioner_version: "v2.1.1-k8s1.11" +rbd_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/rbd-provisioner" +rbd_provisioner_image_tag: "{{ rbd_provisioner_version }}" +local_path_provisioner_version: "v0.0.22" +local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" +local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}" +ingress_nginx_version: "v1.5.1" +ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller" +ingress_nginx_controller_image_tag: "{{ ingress_nginx_version }}" +ingress_nginx_kube_webhook_certgen_imae_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen" +ingress_nginx_kube_webhook_certgen_imae_tag: "v1.3.0" +alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller" +alb_ingress_image_tag: "v1.1.9" +cert_manager_version: "v1.10.1" +cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller" +cert_manager_controller_image_tag: "{{ cert_manager_version }}" +cert_manager_cainjector_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-cainjector" +cert_manager_cainjector_image_tag: "{{ cert_manager_version }}" +cert_manager_webhook_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-webhook" +cert_manager_webhook_image_tag: "{{ cert_manager_version }}" + +csi_attacher_image_repo: "{{ kube_image_repo }}/sig-storage/csi-attacher" +csi_attacher_image_tag: "v3.3.0" +csi_provisioner_image_repo: "{{ kube_image_repo }}/sig-storage/csi-provisioner" +csi_provisioner_image_tag: "v3.0.0" +csi_snapshotter_image_repo: "{{ kube_image_repo }}/sig-storage/csi-snapshotter" +csi_snapshotter_image_tag: "v5.0.0" +csi_resizer_image_repo: "{{ kube_image_repo }}/sig-storage/csi-resizer" +csi_resizer_image_tag: "v1.3.0" +csi_node_driver_registrar_image_repo: "{{ kube_image_repo }}/sig-storage/csi-node-driver-registrar" +csi_node_driver_registrar_image_tag: "v2.4.0" +csi_livenessprobe_image_repo: "{{ kube_image_repo }}/sig-storage/livenessprobe" +csi_livenessprobe_image_tag: "v2.5.0" + +snapshot_controller_supported_versions: + v1.25: "v4.2.1" + v1.24: "v4.2.1" + v1.23: "v4.2.1" +snapshot_controller_image_repo: "{{ kube_image_repo }}/sig-storage/snapshot-controller" +snapshot_controller_image_tag: "{{ snapshot_controller_supported_versions[kube_major_version] }}" + +cinder_csi_plugin_version: "v1.22.0" +cinder_csi_plugin_image_repo: "{{ docker_image_repo }}/k8scloudprovider/cinder-csi-plugin" +cinder_csi_plugin_image_tag: "{{ cinder_csi_plugin_version }}" + +aws_ebs_csi_plugin_version: "v0.5.0" +aws_ebs_csi_plugin_image_repo: "{{ docker_image_repo }}/amazon/aws-ebs-csi-driver" +aws_ebs_csi_plugin_image_tag: "{{ aws_ebs_csi_plugin_version }}" + +gcp_pd_csi_plugin_version: "v1.4.0" +gcp_pd_csi_plugin_image_repo: "{{ kube_image_repo }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver" +gcp_pd_csi_plugin_image_tag: "{{ gcp_pd_csi_plugin_version }}" + +azure_csi_image_repo: "mcr.microsoft.com/oss/kubernetes-csi" +azure_csi_provisioner_image_tag: "v2.2.2" +azure_csi_attacher_image_tag: "v3.3.0" +azure_csi_resizer_image_tag: "v1.3.0" +azure_csi_livenessprobe_image_tag: "v2.5.0" +azure_csi_node_registrar_image_tag: "v2.4.0" +azure_csi_snapshotter_image_tag: "v3.0.3" +azure_csi_plugin_version: "v1.10.0" +azure_csi_plugin_image_repo: "mcr.microsoft.com/k8s/csi" +azure_csi_plugin_image_tag: "{{ azure_csi_plugin_version }}" + +gcp_pd_csi_image_repo: "gke.gcr.io" +gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" +gcp_pd_csi_provisioner_image_tag: "v1.5.0-gke.0" +gcp_pd_csi_attacher_image_tag: "v2.1.1-gke.0" +gcp_pd_csi_resizer_image_tag: "v0.4.0-gke.0" +gcp_pd_csi_registrar_image_tag: "v1.2.0-gke.0" + +dashboard_image_repo: "{{ docker_image_repo }}/kubernetesui/dashboard" +dashboard_image_tag: "v2.7.0" +dashboard_metrics_scraper_repo: "{{ docker_image_repo }}/kubernetesui/metrics-scraper" +dashboard_metrics_scraper_tag: "v1.0.8" + +metallb_speaker_image_repo: "{{ quay_image_repo }}/metallb/speaker" +metallb_controller_image_repo: "{{ quay_image_repo }}/metallb/controller" +metallb_version: v0.12.1 + +downloads: + netcheck_server: + enabled: "{{ deploy_netchecker }}" + container: true + repo: "{{ netcheck_server_image_repo }}" + tag: "{{ netcheck_server_image_tag }}" + sha256: "{{ netcheck_server_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + netcheck_agent: + enabled: "{{ deploy_netchecker }}" + container: true + repo: "{{ netcheck_agent_image_repo }}" + tag: "{{ netcheck_agent_image_tag }}" + sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + etcd: + container: "{{ etcd_deployment_type != 'host' }}" + file: "{{ etcd_deployment_type == 'host' }}" + enabled: true + version: "{{ etcd_version }}" + dest: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + repo: "{{ etcd_image_repo }}" + tag: "{{ etcd_image_tag }}" + sha256: >- + {{ etcd_binary_checksum if (etcd_deployment_type == 'host') + else etcd_digest_checksum|d(None) }} + url: "{{ etcd_download_url }}" + unarchive: "{{ etcd_deployment_type == 'host' }}" + owner: "root" + mode: "0755" + groups: + - etcd + + cni: + enabled: true + file: true + version: "{{ cni_version }}" + dest: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + sha256: "{{ cni_binary_checksum }}" + url: "{{ cni_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kubeadm: + enabled: true + file: true + version: "{{ kubeadm_version }}" + dest: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" + sha256: "{{ kubeadm_binary_checksum }}" + url: "{{ kubeadm_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kubelet: + enabled: true + file: true + version: "{{ kube_version }}" + dest: "{{ local_release_dir }}/kubelet-{{ kube_version }}-{{ image_arch }}" + sha256: "{{ kubelet_binary_checksum }}" + url: "{{ kubelet_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kubectl: + enabled: true + file: true + version: "{{ kube_version }}" + dest: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}" + sha256: "{{ kubectl_binary_checksum }}" + url: "{{ kubectl_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + crictl: + file: true + enabled: true + version: "{{ crictl_version }}" + dest: "{{ local_release_dir }}/crictl-{{ crictl_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ crictl_binary_checksum }}" + url: "{{ crictl_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + crio: + file: true + enabled: "{{ container_manager == 'crio' }}" + version: "{{ crio_version }}" + dest: "{{ local_release_dir }}/cri-o.{{ image_arch }}.{{ crio_version }}tar.gz" + sha256: "{{ crio_archive_checksum }}" + url: "{{ crio_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + cri_dockerd: + file: true + enabled: "{{ container_manager == 'docker' }}" + version: "{{ cri_dockerd_version }}" + dest: "{{ local_release_dir }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tar.gz" + sha256: "{{ cri_dockerd_archive_checksum }}" + url: "{{ cri_dockerd_download_url }}" + unarchive: true + unarchive_extra_opts: + - --strip=1 + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + crun: + file: true + enabled: "{{ crun_enabled }}" + version: "{{ crun_version }}" + dest: "{{ local_release_dir }}/crun" + sha256: "{{ crun_binary_checksum }}" + url: "{{ crun_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + youki: + file: true + enabled: "{{ youki_enabled }}" + version: "{{ youki_version }}" + dest: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz" + sha256: "{{ youki_archive_checksum }}" + url: "{{ youki_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + runc: + file: true + enabled: "{{ container_manager == 'containerd' }}" + version: "{{ runc_version }}" + dest: "{{ local_release_dir }}/runc" + sha256: "{{ runc_binary_checksum }}" + url: "{{ runc_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + kata_containers: + enabled: "{{ kata_containers_enabled }}" + file: true + version: "{{ kata_containers_version }}" + dest: "{{ local_release_dir }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" + sha256: "{{ kata_containers_binary_checksum }}" + url: "{{ kata_containers_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + containerd: + enabled: "{{ container_manager == 'containerd' }}" + file: true + version: "{{ containerd_version }}" + dest: "{{ local_release_dir }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ containerd_archive_checksum }}" + url: "{{ containerd_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + gvisor_runsc: + enabled: "{{ gvisor_enabled }}" + file: true + version: "{{ gvisor_version }}" + dest: "{{ local_release_dir }}/gvisor-runsc" + sha256: "{{ gvisor_runsc_binary_checksum }}" + url: "{{ gvisor_runsc_download_url }}" + unarchive: false + owner: "root" + mode: 755 + groups: + - k8s_cluster + + gvisor_containerd_shim: + enabled: "{{ gvisor_enabled }}" + file: true + version: "{{ gvisor_version }}" + dest: "{{ local_release_dir }}/gvisor-containerd-shim-runsc-v1" + sha256: "{{ gvisor_containerd_shim_binary_checksum }}" + url: "{{ gvisor_containerd_shim_runsc_download_url }}" + unarchive: false + owner: "root" + mode: 755 + groups: + - k8s_cluster + + nerdctl: + file: true + enabled: "{{ container_manager == 'containerd' }}" + version: "{{ nerdctl_version }}" + dest: "{{ local_release_dir }}/nerdctl-{{ nerdctl_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ nerdctl_archive_checksum }}" + url: "{{ nerdctl_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + skopeo: + file: true + enabled: "{{ container_manager == 'crio' }}" + version: "{{ skopeo_version }}" + dest: "{{ local_release_dir }}/skopeo" + sha256: "{{ skopeo_binary_checksum }}" + url: "{{ skopeo_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + cilium: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + container: true + repo: "{{ cilium_image_repo }}" + tag: "{{ cilium_image_tag }}" + sha256: "{{ cilium_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_operator: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + container: true + repo: "{{ cilium_operator_image_repo }}" + tag: "{{ cilium_operator_image_tag }}" + sha256: "{{ cilium_operator_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_relay: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_relay_image_repo }}" + tag: "{{ cilium_hubble_relay_image_tag }}" + sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_certgen: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_certgen_image_repo }}" + tag: "{{ cilium_hubble_certgen_image_tag }}" + sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_ui: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_ui_image_repo }}" + tag: "{{ cilium_hubble_ui_image_tag }}" + sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_ui_backend: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_ui_backend_image_repo }}" + tag: "{{ cilium_hubble_ui_backend_image_tag }}" + sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + cilium_hubble_envoy: + enabled: "{{ cilium_enable_hubble }}" + container: true + repo: "{{ cilium_hubble_envoy_image_repo }}" + tag: "{{ cilium_hubble_envoy_image_tag }}" + sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + ciliumcli: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + file: true + version: "{{ cilium_cli_version }}" + dest: "{{ local_release_dir }}/cilium" + sha256: "{{ ciliumcli_binary_checksum }}" + url: "{{ ciliumcli_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + multus: + enabled: "{{ kube_network_plugin_multus }}" + container: true + repo: "{{ multus_image_repo }}" + tag: "{{ multus_image_tag }}" + sha256: "{{ multus_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + flannel: + enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ flannel_image_repo }}" + tag: "{{ flannel_image_tag }}" + sha256: "{{ flannel_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + flannel_init: + enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ flannel_init_image_repo }}" + tag: "{{ flannel_init_image_tag }}" + sha256: "{{ flannel_init_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calicoctl: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + file: true + version: "{{ calico_ctl_version }}" + dest: "{{ local_release_dir }}/calicoctl" + sha256: "{{ calicoctl_binary_checksum }}" + url: "{{ calicoctl_download_url }}" + mirrors: + - "{{ calicoctl_alternate_download_url }}" + - "{{ calicoctl_download_url }}" + unarchive: false + owner: "root" + mode: "0755" + groups: + - k8s_cluster + + calico_node: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ calico_node_image_repo }}" + tag: "{{ calico_node_image_tag }}" + sha256: "{{ calico_node_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_cni: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ calico_cni_image_repo }}" + tag: "{{ calico_cni_image_tag }}" + sha256: "{{ calico_cni_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_flexvol: + enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ calico_flexvol_image_repo }}" + tag: "{{ calico_flexvol_image_tag }}" + sha256: "{{ calico_flexvol_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_policy: + enabled: "{{ enable_network_policy and kube_network_plugin in ['calico', 'canal'] }}" + container: true + repo: "{{ calico_policy_image_repo }}" + tag: "{{ calico_policy_image_tag }}" + sha256: "{{ calico_policy_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_typha: + enabled: "{{ typha_enabled }}" + container: true + repo: "{{ calico_typha_image_repo }}" + tag: "{{ calico_typha_image_tag }}" + sha256: "{{ calico_typha_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_apiserver: + enabled: "{{ calico_apiserver_enabled }}" + container: true + repo: "{{ calico_apiserver_image_repo }}" + tag: "{{ calico_apiserver_image_tag }}" + sha256: "{{ calico_apiserver_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + calico_crds: + file: true + enabled: "{{ kube_network_plugin == 'calico' and calico_datastore == 'kdd' }}" + version: "{{ calico_version }}" + dest: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ calico_version }}.tar.gz" + sha256: "{{ calico_crds_archive_checksum }}" + url: "{{ calico_crds_download_url }}" + unarchive: true + unarchive_extra_opts: + - "{{ '--strip=6' if (calico_version is version('v3.22.3','<')) else '--strip=3' }}" + - "--wildcards" + - "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3','<')) else '*/libcalico-go/config/crd/' }}" + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + weave_kube: + enabled: "{{ kube_network_plugin == 'weave' }}" + container: true + repo: "{{ weave_kube_image_repo }}" + tag: "{{ weave_kube_image_tag }}" + sha256: "{{ weave_kube_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + weave_npc: + enabled: "{{ kube_network_plugin == 'weave' }}" + container: true + repo: "{{ weave_npc_image_repo }}" + tag: "{{ weave_npc_image_tag }}" + sha256: "{{ weave_npc_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + kube_ovn: + enabled: "{{ kube_network_plugin == 'kube-ovn' }}" + container: true + repo: "{{ kube_ovn_container_image_repo }}" + tag: "{{ kube_ovn_container_image_tag }}" + sha256: "{{ kube_ovn_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + kube_router: + enabled: "{{ kube_network_plugin == 'kube-router' }}" + container: true + repo: "{{ kube_router_image_repo }}" + tag: "{{ kube_router_image_tag }}" + sha256: "{{ kube_router_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + pod_infra: + enabled: true + container: true + repo: "{{ pod_infra_image_repo }}" + tag: "{{ pod_infra_image_tag }}" + sha256: "{{ pod_infra_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + kube-vip: + enabled: "{{ kube_vip_enabled }}" + container: true + repo: "{{ kube_vip_image_repo }}" + tag: "{{ kube_vip_image_tag }}" + sha256: "{{ kube_vip_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + nginx: + enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}" + container: true + repo: "{{ nginx_image_repo }}" + tag: "{{ nginx_image_tag }}" + sha256: "{{ nginx_digest_checksum|default(None) }}" + groups: + - kube_node + + haproxy: + enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}" + container: true + repo: "{{ haproxy_image_repo }}" + tag: "{{ haproxy_image_tag }}" + sha256: "{{ haproxy_digest_checksum|default(None) }}" + groups: + - kube_node + + coredns: + enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" + container: true + repo: "{{ coredns_image_repo }}" + tag: "{{ coredns_image_tag }}" + sha256: "{{ coredns_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + nodelocaldns: + enabled: "{{ enable_nodelocaldns }}" + container: true + repo: "{{ nodelocaldns_image_repo }}" + tag: "{{ nodelocaldns_image_tag }}" + sha256: "{{ nodelocaldns_digest_checksum|default(None) }}" + groups: + - k8s_cluster + + dnsautoscaler: + enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" + container: true + repo: "{{ dnsautoscaler_image_repo }}" + tag: "{{ dnsautoscaler_image_tag }}" + sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + helm: + enabled: "{{ helm_enabled }}" + file: true + version: "{{ helm_version }}" + dest: "{{ local_release_dir }}/helm-{{ helm_version }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ helm_archive_checksum }}" + url: "{{ helm_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + krew: + enabled: "{{ krew_enabled }}" + file: true + version: "{{ krew_version }}" + dest: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz" + sha256: "{{ krew_archive_checksum }}" + url: "{{ krew_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - kube_control_plane + + registry: + enabled: "{{ registry_enabled }}" + container: true + repo: "{{ registry_image_repo }}" + tag: "{{ registry_image_tag }}" + sha256: "{{ registry_digest_checksum|default(None) }}" + groups: + - kube_node + + metrics_server: + enabled: "{{ metrics_server_enabled }}" + container: true + repo: "{{ metrics_server_image_repo }}" + tag: "{{ metrics_server_image_tag }}" + sha256: "{{ metrics_server_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + local_volume_provisioner: + enabled: "{{ local_volume_provisioner_enabled }}" + container: true + repo: "{{ local_volume_provisioner_image_repo }}" + tag: "{{ local_volume_provisioner_image_tag }}" + sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + cephfs_provisioner: + enabled: "{{ cephfs_provisioner_enabled }}" + container: true + repo: "{{ cephfs_provisioner_image_repo }}" + tag: "{{ cephfs_provisioner_image_tag }}" + sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + rbd_provisioner: + enabled: "{{ rbd_provisioner_enabled }}" + container: true + repo: "{{ rbd_provisioner_image_repo }}" + tag: "{{ rbd_provisioner_image_tag }}" + sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + local_path_provisioner: + enabled: "{{ local_path_provisioner_enabled }}" + container: true + repo: "{{ local_path_provisioner_image_repo }}" + tag: "{{ local_path_provisioner_image_tag }}" + sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + ingress_nginx_controller: + enabled: "{{ ingress_nginx_enabled }}" + container: true + repo: "{{ ingress_nginx_controller_image_repo }}" + tag: "{{ ingress_nginx_controller_image_tag }}" + sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + ingress_alb_controller: + enabled: "{{ ingress_alb_enabled }}" + container: true + repo: "{{ alb_ingress_image_repo }}" + tag: "{{ alb_ingress_image_tag }}" + sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + cert_manager_controller: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_controller_image_repo }}" + tag: "{{ cert_manager_controller_image_tag }}" + sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + cert_manager_cainjector: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_cainjector_image_repo }}" + tag: "{{ cert_manager_cainjector_image_tag }}" + sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}" + groups: + - kube_node + + cert_manager_webhook: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_webhook_image_repo }}" + tag: "{{ cert_manager_webhook_image_tag }}" + sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_attacher: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_attacher_image_repo }}" + tag: "{{ csi_attacher_image_tag }}" + sha256: "{{ csi_attacher_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_provisioner: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_provisioner_image_repo }}" + tag: "{{ csi_provisioner_image_tag }}" + sha256: "{{ csi_provisioner_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_snapshotter: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_snapshotter_image_repo }}" + tag: "{{ csi_snapshotter_image_tag }}" + sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}" + groups: + - kube_node + + snapshot_controller: + enabled: "{{ csi_snapshot_controller_enabled }}" + container: true + repo: "{{ snapshot_controller_image_repo }}" + tag: "{{ snapshot_controller_image_tag }}" + sha256: "{{ snapshot_controller_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_resizer: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_resizer_image_repo }}" + tag: "{{ csi_resizer_image_tag }}" + sha256: "{{ csi_resizer_digest_checksum|default(None) }}" + groups: + - kube_node + + csi_node_driver_registrar: + enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" + container: true + repo: "{{ csi_node_driver_registrar_image_repo }}" + tag: "{{ csi_node_driver_registrar_image_tag }}" + sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}" + groups: + - kube_node + + cinder_csi_plugin: + enabled: "{{ cinder_csi_enabled }}" + container: true + repo: "{{ cinder_csi_plugin_image_repo }}" + tag: "{{ cinder_csi_plugin_image_tag }}" + sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}" + groups: + - kube_node + + aws_ebs_csi_plugin: + enabled: "{{ aws_ebs_csi_enabled }}" + container: true + repo: "{{ aws_ebs_csi_plugin_image_repo }}" + tag: "{{ aws_ebs_csi_plugin_image_tag }}" + sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}" + groups: + - kube_node + + dashboard: + enabled: "{{ dashboard_enabled }}" + container: true + repo: "{{ dashboard_image_repo }}" + tag: "{{ dashboard_image_tag }}" + sha256: "{{ dashboard_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + dashboard_metrics_scrapper: + enabled: "{{ dashboard_enabled }}" + container: true + repo: "{{ dashboard_metrics_scraper_repo }}" + tag: "{{ dashboard_metrics_scraper_tag }}" + sha256: "{{ dashboard_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + metallb_speaker: + enabled: "{{ metallb_speaker_enabled }}" + container: true + repo: "{{ metallb_speaker_image_repo }}" + tag: "{{ metallb_version }}" + sha256: "{{ metallb_speaker_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + metallb_controller: + enabled: "{{ metallb_enabled }}" + container: true + repo: "{{ metallb_controller_image_repo }}" + tag: "{{ metallb_version }}" + sha256: "{{ metallb_controller_digest_checksum|default(None) }}" + groups: + - kube_control_plane + +download_defaults: + container: false + file: false + repo: None + tag: None + enabled: false + dest: None + version: None + url: None + unarchive: false + owner: "{{ kube_owner }}" + mode: None diff --git a/kubespray/roles/download/meta/main.yml b/kubespray/roles/download/meta/main.yml new file mode 100644 index 0000000..61d3ffe --- /dev/null +++ b/kubespray/roles/download/meta/main.yml @@ -0,0 +1,2 @@ +--- +allow_duplicates: true diff --git a/kubespray/roles/download/tasks/check_pull_required.yml b/kubespray/roles/download/tasks/check_pull_required.yml new file mode 100644 index 0000000..c2f9ead --- /dev/null +++ b/kubespray/roles/download/tasks/check_pull_required.yml @@ -0,0 +1,25 @@ +--- +# The image_info_command depends on the Container Runtime and will output something like the following: +# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc... +- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell + shell: "{{ image_info_command }}" + register: docker_images + changed_when: false + check_mode: no + when: not download_always_pull + +- name: check_pull_required | Set pull_required if the desired image is not yet loaded + set_fact: + pull_required: >- + {%- if image_reponame | regex_replace('^docker\.io/(library/)?','') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} + when: not download_always_pull + +- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag + assert: + that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')" + when: + - not download_always_pull + - not pull_required + - pull_by_digest + tags: + - asserts diff --git a/kubespray/roles/download/tasks/download_container.yml b/kubespray/roles/download/tasks/download_container.yml new file mode 100644 index 0000000..41790fe --- /dev/null +++ b/kubespray/roles/download/tasks/download_container.yml @@ -0,0 +1,125 @@ +--- +- block: + - name: set default values for flag variables + set_fact: + image_is_cached: false + image_changed: false + pull_required: "{{ download_always_pull }}" + tags: + - facts + + - name: download_container | Set a few facts + import_tasks: set_container_facts.yml + tags: + - facts + + - name: download_container | Prepare container download + include_tasks: check_pull_required.yml + when: + - not download_always_pull + + - debug: # noqa unnamed-task + msg: "Pull {{ image_reponame }} required is: {{ pull_required }}" + + - name: download_container | Determine if image is in cache + stat: + path: "{{ image_path_cached }}" + get_attributes: no + get_checksum: no + get_mime: no + delegate_to: localhost + connection: local + delegate_facts: no + register: cache_image + changed_when: false + become: false + when: + - download_force_cache + + - name: download_container | Set fact indicating if image is in cache + set_fact: + image_is_cached: "{{ cache_image.stat.exists }}" + tags: + - facts + when: + - download_force_cache + + - name: Stop if image not in cache on ansible host when download_force_cache=true + assert: + that: image_is_cached + msg: "Image cache file {{ image_path_cached }} not found for {{ image_reponame }} on localhost" + when: + - download_force_cache + - not download_run_once + + - name: download_container | Download image if required + command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}" + delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}" + delegate_facts: yes + run_once: "{{ download_run_once }}" + register: pull_task_result + until: pull_task_result is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 4 + become: "{{ user_can_become_root | default(false) or not download_localhost }}" + environment: "{{ proxy_env if container_manager == 'containerd' else omit }}" + when: + - pull_required or download_run_once + - not image_is_cached + + - name: download_container | Save and compress image + shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell + delegate_to: "{{ download_delegate }}" + delegate_facts: no + register: container_save_status + failed_when: container_save_status.stderr + run_once: true + become: "{{ user_can_become_root | default(false) or not download_localhost }}" + when: + - not image_is_cached + - download_run_once + + - name: download_container | Copy image to ansible host cache + synchronize: + src: "{{ image_path_final }}" + dest: "{{ image_path_cached }}" + use_ssh_args: true + mode: pull + when: + - not image_is_cached + - download_run_once + - not download_localhost + - download_delegate == inventory_hostname + + - name: download_container | Upload image to node if it is cached + synchronize: + src: "{{ image_path_cached }}" + dest: "{{ image_path_final }}" + use_ssh_args: true + mode: push + delegate_facts: no + register: upload_image + failed_when: not upload_image + until: upload_image is succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - pull_required + - download_force_cache + + - name: download_container | Load image into the local container registry + shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell + register: container_load_status + failed_when: container_load_status is failed + when: + - pull_required + - download_force_cache + + - name: download_container | Remove container image from cache + file: + state: absent + path: "{{ image_path_final }}" + when: + - not download_keep_remote_cache + tags: + - download diff --git a/kubespray/roles/download/tasks/download_file.yml b/kubespray/roles/download/tasks/download_file.yml new file mode 100644 index 0000000..376a15e --- /dev/null +++ b/kubespray/roles/download/tasks/download_file.yml @@ -0,0 +1,141 @@ +--- +- block: + - name: prep_download | Set a few facts + set_fact: + download_force_cache: "{{ true if download_run_once else download_force_cache }}" + + - name: download_file | Starting download of file + debug: + msg: "{{ download.url }}" + run_once: "{{ download_run_once }}" + + - name: download_file | Set pathname of cached file + set_fact: + file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}" + tags: + - facts + + - name: download_file | Create dest directory on node + file: + path: "{{ download.dest | dirname }}" + owner: "{{ download.owner | default(omit) }}" + mode: 0755 + state: directory + recurse: yes + + - name: download_file | Create local cache directory + file: + path: "{{ file_path_cached | dirname }}" + state: directory + recurse: yes + delegate_to: localhost + connection: local + delegate_facts: false + run_once: true + become: false + when: + - download_force_cache + tags: + - localhost + + - name: download_file | Create cache directory on download_delegate host + file: + path: "{{ file_path_cached | dirname }}" + state: directory + recurse: yes + delegate_to: "{{ download_delegate }}" + delegate_facts: false + run_once: true + when: + - download_force_cache + - not download_localhost + + # We check a number of mirrors that may hold the file and pick a working one at random + # This task will avoid logging it's parameters to not leak environment passwords in the log + - name: download_file | Validate mirrors + uri: + url: "{{ mirror }}" + method: HEAD + validate_certs: "{{ download_validate_certs }}" + url_username: "{{ download.username | default(omit) }}" + url_password: "{{ download.password | default(omit) }}" + force_basic_auth: "{{ download.force_basic_auth | default(omit) }}" + delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" + run_once: "{{ download_force_cache }}" + register: uri_result + until: uri_result is success + retries: 4 + delay: "{{ retry_stagger | default(5) }}" + environment: "{{ proxy_env }}" + no_log: "{{ not (unsafe_show_logs|bool) }}" + loop: "{{ download.mirrors | default([download.url]) }}" + loop_control: + loop_var: mirror + ignore_errors: true + + # Ansible 2.9 requires we convert a generator to a list + - name: download_file | Get the list of working mirrors + set_fact: + valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}" + delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" + + # This must always be called, to check if the checksum matches. On no-match the file is re-downloaded. + # This task will avoid logging it's parameters to not leak environment passwords in the log + - name: download_file | Download item + get_url: + url: "{{ valid_mirror_urls | random }}" + dest: "{{ file_path_cached if download_force_cache else download.dest }}" + owner: "{{ omit if download_localhost else (download.owner | default(omit)) }}" + mode: "{{ omit if download_localhost else (download.mode | default(omit)) }}" + checksum: "{{ 'sha256:' + download.sha256 if download.sha256 else omit }}" + validate_certs: "{{ download_validate_certs }}" + url_username: "{{ download.username | default(omit) }}" + url_password: "{{ download.password | default(omit) }}" + force_basic_auth: "{{ download.force_basic_auth | default(omit) }}" + delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" + run_once: "{{ download_force_cache }}" + register: get_url_result + become: "{{ not download_localhost }}" + until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg" + retries: 4 + delay: "{{ retry_stagger | default(5) }}" + environment: "{{ proxy_env }}" + no_log: "{{ not (unsafe_show_logs|bool) }}" + + - name: download_file | Copy file back to ansible host file cache + synchronize: + src: "{{ file_path_cached }}" + dest: "{{ file_path_cached }}" + use_ssh_args: true + mode: pull + when: + - download_force_cache + - not download_localhost + - download_delegate == inventory_hostname + + - name: download_file | Copy file from cache to nodes, if it is available + synchronize: + src: "{{ file_path_cached }}" + dest: "{{ download.dest }}" + use_ssh_args: true + mode: push + register: get_task + until: get_task is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 4 + when: + - download_force_cache + + - name: download_file | Set mode and owner + file: + path: "{{ download.dest }}" + mode: "{{ download.mode | default(omit) }}" + owner: "{{ download.owner | default(omit) }}" + when: + - download_force_cache + + - name: "download_file | Extract file archives" + include_tasks: "extract_file.yml" + + tags: + - download diff --git a/kubespray/roles/download/tasks/extract_file.yml b/kubespray/roles/download/tasks/extract_file.yml new file mode 100644 index 0000000..81858dd --- /dev/null +++ b/kubespray/roles/download/tasks/extract_file.yml @@ -0,0 +1,11 @@ +--- +- name: extract_file | Unpacking archive + unarchive: + src: "{{ download.dest }}" + dest: "{{ download.dest | dirname }}" + owner: "{{ download.owner | default(omit) }}" + mode: "{{ download.mode | default(omit) }}" + copy: no + extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}" + when: + - download.unarchive | default(false) diff --git a/kubespray/roles/download/tasks/main.yml b/kubespray/roles/download/tasks/main.yml new file mode 100644 index 0000000..536c293 --- /dev/null +++ b/kubespray/roles/download/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: download | Prepare working directories and variables + import_tasks: prep_download.yml + when: + - not skip_downloads|default(false) + tags: + - download + - upload + +- name: download | Get kubeadm binary and list of required images + include_tasks: prep_kubeadm_images.yml + when: + - not skip_downloads|default(false) + - inventory_hostname in groups['kube_control_plane'] + tags: + - download + - upload + +- name: download | Download files / images + include_tasks: "{{ include_file }}" + loop: "{{ downloads | combine(kubeadm_images) | dict2items }}" + vars: + download: "{{ download_defaults | combine(item.value) }}" + include_file: "download_{% if download.container %}container{% else %}file{% endif %}.yml" + when: + - not skip_downloads | default(false) + - download.enabled + - item.value.enabled + - (not (item.value.container | default(false))) or (item.value.container and download_container) + - (download_run_once and inventory_hostname == download_delegate) or (group_names | intersect(download.groups) | length) diff --git a/kubespray/roles/download/tasks/prep_download.yml b/kubespray/roles/download/tasks/prep_download.yml new file mode 100644 index 0000000..9419f24 --- /dev/null +++ b/kubespray/roles/download/tasks/prep_download.yml @@ -0,0 +1,92 @@ +--- +- name: prep_download | Set a few facts + set_fact: + download_force_cache: "{{ true if download_run_once else download_force_cache }}" + tags: + - facts + +- name: prep_download | On localhost, check if passwordless root is possible + command: "true" + delegate_to: localhost + connection: local + run_once: true + register: test_become + changed_when: false + ignore_errors: true # noqa ignore-errors + become: true + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | On localhost, check if user has access to the container runtime without using sudo + shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell + delegate_to: localhost + connection: local + run_once: true + register: test_docker + changed_when: false + ignore_errors: true # noqa ignore-errors + become: false + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | Parse the outputs of the previous commands + set_fact: + user_in_docker_group: "{{ not test_docker.failed }}" + user_can_become_root: "{{ not test_become.failed }}" + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | Check that local user is in group or can become root + assert: + that: "user_in_docker_group or user_can_become_root" + msg: >- + Error: User is not in docker group and cannot become root. When download_localhost is true, at least one of these two conditions must be met. + when: + - download_localhost + tags: + - localhost + - asserts + +- name: prep_download | Register docker images info + shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell + no_log: "{{ not (unsafe_show_logs|bool) }}" + register: docker_images + failed_when: false + changed_when: false + check_mode: no + when: download_container + +- name: prep_download | Create staging directory on remote node + file: + path: "{{ local_release_dir }}/images" + state: directory + recurse: yes + mode: 0755 + owner: "{{ ansible_ssh_user | default(ansible_user_id) }}" + when: + - ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: prep_download | Create local cache for files and images on control node + file: + path: "{{ download_cache_dir }}/images" + state: directory + recurse: yes + mode: 0755 + delegate_to: localhost + connection: local + delegate_facts: no + run_once: true + become: false + when: + - download_force_cache + tags: + - localhost diff --git a/kubespray/roles/download/tasks/prep_kubeadm_images.yml b/kubespray/roles/download/tasks/prep_kubeadm_images.yml new file mode 100644 index 0000000..aa21849 --- /dev/null +++ b/kubespray/roles/download/tasks/prep_kubeadm_images.yml @@ -0,0 +1,71 @@ +--- +- name: prep_kubeadm_images | Check kubeadm version matches kubernetes version + fail: + msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}" + when: + - not skip_downloads | default(false) + - not kubeadm_version == downloads.kubeadm.version + +- name: prep_kubeadm_images | Download kubeadm binary + include_tasks: "download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.kubeadm) }}" + when: + - not skip_downloads | default(false) + - downloads.kubeadm.enabled + +- name: prep_kubeadm_images | Create kubeadm config + template: + src: "kubeadm-images.yaml.j2" + dest: "{{ kube_config_dir }}/kubeadm-images.yaml" + mode: 0644 + when: + - not skip_kubeadm_images|default(false) + +- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path + copy: + src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubeadm" + mode: 0755 + remote_src: true + +- name: prep_kubeadm_images | Set kubeadm binary permissions + file: + path: "{{ bin_dir }}/kubeadm" + mode: "0755" + state: file + +- name: prep_kubeadm_images | Generate list of required images + shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'" + args: + executable: /bin/bash + register: kubeadm_images_raw + run_once: true + changed_when: false + when: + - not skip_kubeadm_images|default(false) + +- name: prep_kubeadm_images | Parse list of images + vars: + kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}" + set_fact: + kubeadm_image: + key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*','')).split(':')[0] }}" + value: + enabled: true + container: true + repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}" + tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}" + groups: k8s_cluster + loop: "{{ kubeadm_images_list | flatten(levels=1) }}" + register: kubeadm_images_cooked + run_once: true + when: + - not skip_kubeadm_images|default(false) + +- name: prep_kubeadm_images | Convert list of images to dict for later use + set_fact: + kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}" + run_once: true + when: + - not skip_kubeadm_images|default(false) diff --git a/kubespray/roles/download/tasks/set_container_facts.yml b/kubespray/roles/download/tasks/set_container_facts.yml new file mode 100644 index 0000000..9d36c24 --- /dev/null +++ b/kubespray/roles/download/tasks/set_container_facts.yml @@ -0,0 +1,55 @@ +--- +- name: set_container_facts | Display the name of the image being processed + debug: + msg: "{{ download.repo }}" + +- name: set_container_facts | Set if containers should be pulled by digest + set_fact: + pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}" + +- name: set_container_facts | Define by what name to pull the image + set_fact: + image_reponame: >- + {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%} + +- name: set_container_facts | Define file name of image + set_fact: + image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar" + +- name: set_container_facts | Define path of image + set_fact: + image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}" + image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}" + +- name: Set image save/load command for docker + set_fact: + image_save_command: "{{ docker_bin_dir }}/docker save {{ image_reponame }} | gzip -{{ download_compress }} > {{ image_path_final }}" + image_load_command: "{{ docker_bin_dir }}/docker load < {{ image_path_final }}" + when: container_manager == 'docker' + +- name: Set image save/load command for containerd + set_fact: + image_save_command: "{{ bin_dir }}/nerdctl -n k8s.io image save -o {{ image_path_final }} {{ image_reponame }}" + image_load_command: "{{ bin_dir }}/nerdctl -n k8s.io image load < {{ image_path_final }}" + when: container_manager == 'containerd' + +- name: Set image save/load command for crio + set_fact: + image_save_command: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null" + image_load_command: "{{ bin_dir }}/skopeo copy docker-archive:{{ image_path_final }} containers-storage:{{ image_reponame }} 2>/dev/null" + when: container_manager == 'crio' + +- name: Set image save/load command for docker on localhost + set_fact: + image_save_command_on_localhost: "{{ docker_bin_dir }}/docker save {{ image_reponame }} | gzip -{{ download_compress }} > {{ image_path_cached }}" + when: container_manager_on_localhost == 'docker' + +- name: Set image save/load command for containerd on localhost + set_fact: + image_save_command_on_localhost: "{{ containerd_bin_dir }}/ctr -n k8s.io image export --platform linux/{{ image_arch }} {{ image_path_cached }} {{ image_reponame }}" + when: container_manager_on_localhost == 'containerd' + +- name: Set image save/load command for crio on localhost + set_fact: + image_save_command_on_localhost: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null" + when: container_manager_on_localhost == 'crio' diff --git a/kubespray/roles/download/templates/kubeadm-images.yaml.j2 b/kubespray/roles/download/templates/kubeadm-images.yaml.j2 new file mode 100644 index 0000000..3a9121d --- /dev/null +++ b/kubespray/roles/download/templates/kubeadm-images.yaml.j2 @@ -0,0 +1,25 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +nodeRegistration: + criSocket: {{ cri_socket }} +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +imageRepository: {{ kube_image_repo }} +kubernetesVersion: {{ kube_version }} +etcd: +{% if etcd_deployment_type == "kubeadm" %} + local: + imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}" + imageTag: "{{ etcd_image_tag }}" +{% else %} + external: + endpoints: +{% for endpoint in etcd_access_addresses.split(',') %} + - {{ endpoint }} +{% endfor %} +{% endif %} +dns: + type: CoreDNS + imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }} + imageTag: {{ coredns_image_tag }} diff --git a/kubespray/roles/etcd/defaults/main.yml b/kubespray/roles/etcd/defaults/main.yml new file mode 100644 index 0000000..bf38ace --- /dev/null +++ b/kubespray/roles/etcd/defaults/main.yml @@ -0,0 +1,122 @@ +--- +# Set etcd user +etcd_owner: etcd + +# Set to false to only do certificate management +etcd_cluster_setup: true +etcd_events_cluster_setup: false + +# Set to true to separate k8s events to a different etcd cluster +etcd_events_cluster_enabled: false + +etcd_backup_prefix: "/var/backups" +etcd_data_dir: "/var/lib/etcd" + +# Number of etcd backups to retain. Set to a value < 0 to retain all backups +etcd_backup_retention_count: -1 + +force_etcd_cert_refresh: true +etcd_config_dir: /etc/ssl/etcd +etcd_cert_dir: "{{ etcd_config_dir }}/ssl" +etcd_cert_dir_mode: "0700" +etcd_cert_group: root +# Note: This does not set up DNS entries. It simply adds the following DNS +# entries to the certificate +etcd_cert_alt_names: + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" + - "etcd" +etcd_cert_alt_ips: [] + +etcd_script_dir: "{{ bin_dir }}/etcd-scripts" + +etcd_heartbeat_interval: "250" +etcd_election_timeout: "5000" + +# etcd_snapshot_count: "10000" + +etcd_metrics: "basic" + +# Define in inventory to set a separate port for etcd to expose metrics on +# etcd_metrics_port: 2381 + +## A dictionary of extra environment variables to add to etcd.env, formatted like: +## etcd_extra_vars: +## ETCD_VAR1: "value1" +## ETCD_VAR2: "value2" +etcd_extra_vars: {} + +# Limits +# Limit memory only if <4GB memory on host. 0=unlimited +# This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}" + +# The default storage size limit is 2G. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +# Uncomment to set CPU share for etcd +# etcd_cpu_limit: 300m + +etcd_blkio_weight: 1000 + +etcd_node_cert_hosts: "{{ groups['k8s_cluster'] }}" + +etcd_compaction_retention: "8" + +# Force clients like etcdctl to use TLS certs (different than peer security) +etcd_secure_client: true + +# Enable peer client cert authentication +etcd_peer_client_auth: true + +# Maximum number of snapshot files to retain (0 is unlimited) +# etcd_max_snapshots: 5 + +# Maximum number of wal files to retain (0 is unlimited) +# etcd_max_wals: 5 + +# Number of loop retries +etcd_retries: 4 + +## Support tls cipher suites. +# etcd_tls_cipher_suites: {} +# - TLS_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + +# ETCD 3.5.x issue +# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer +etcd_experimental_initial_corrupt_check: true + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false diff --git a/kubespray/roles/etcd/handlers/backup.yml b/kubespray/roles/etcd/handlers/backup.yml new file mode 100644 index 0000000..d848cdb --- /dev/null +++ b/kubespray/roles/etcd/handlers/backup.yml @@ -0,0 +1,62 @@ +--- +- name: Backup etcd data + command: /bin/true + notify: + - Refresh Time Fact + - Set Backup Directory + - Create Backup Directory + - Stat etcd v2 data directory + - Backup etcd v2 data + - Backup etcd v3 data + when: etcd_cluster_is_healthy.rc == 0 + +- name: Refresh Time Fact + setup: filter=ansible_date_time + +- name: Set Backup Directory + set_fact: + etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}" + +- name: Create Backup Directory + file: + path: "{{ etcd_backup_directory }}" + state: directory + owner: root + group: root + mode: 0600 + +- name: Stat etcd v2 data directory + stat: + path: "{{ etcd_data_dir }}/member" + get_attributes: no + get_checksum: no + get_mime: no + register: etcd_data_dir_member + +- name: Backup etcd v2 data + when: etcd_data_dir_member.stat.exists + command: >- + {{ bin_dir }}/etcdctl backup + --data-dir {{ etcd_data_dir }} + --backup-dir {{ etcd_backup_directory }} + environment: + ETCDCTL_API: 2 + retries: 3 + register: backup_v2_command + until: backup_v2_command.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + +- name: Backup etcd v3 data + command: >- + {{ bin_dir }}/etcdctl + snapshot save {{ etcd_backup_directory }}/snapshot.db + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + retries: 3 + register: etcd_backup_v3_command + until: etcd_backup_v3_command.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" diff --git a/kubespray/roles/etcd/handlers/backup_cleanup.yml b/kubespray/roles/etcd/handlers/backup_cleanup.yml new file mode 100644 index 0000000..e670f46 --- /dev/null +++ b/kubespray/roles/etcd/handlers/backup_cleanup.yml @@ -0,0 +1,11 @@ +--- +- name: Cleanup etcd backups + command: /bin/true + notify: + - Remove old etcd backups + +- name: Remove old etcd backups + shell: + chdir: "{{ etcd_backup_prefix }}" + cmd: "find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf" + when: etcd_backup_retention_count >= 0 diff --git a/kubespray/roles/etcd/handlers/main.yml b/kubespray/roles/etcd/handlers/main.yml new file mode 100644 index 0000000..ccf8f8f --- /dev/null +++ b/kubespray/roles/etcd/handlers/main.yml @@ -0,0 +1,62 @@ +--- +- name: restart etcd + command: /bin/true + notify: + - Backup etcd data + - etcd | reload systemd + - reload etcd + - wait for etcd up + - Cleanup etcd backups + +- name: restart etcd-events + command: /bin/true + notify: + - etcd | reload systemd + - reload etcd-events + - wait for etcd-events up + +- import_tasks: backup.yml + +- name: etcd | reload systemd + systemd: + daemon_reload: true + +- name: reload etcd + service: + name: etcd + state: restarted + when: is_etcd_master + +- name: reload etcd-events + service: + name: etcd-events + state: restarted + when: is_etcd_master + +- name: wait for etcd up + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" + validate_certs: no + client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" + client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" + register: result + until: result.status is defined and result.status == 200 + retries: 60 + delay: 1 + +- import_tasks: backup_cleanup.yml + +- name: wait for etcd-events up + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health" + validate_certs: no + client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" + client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" + register: result + until: result.status is defined and result.status == 200 + retries: 60 + delay: 1 + +- name: set etcd_secret_changed + set_fact: + etcd_secret_changed: true diff --git a/kubespray/roles/etcd/meta/main.yml b/kubespray/roles/etcd/meta/main.yml new file mode 100644 index 0000000..e996646 --- /dev/null +++ b/kubespray/roles/etcd/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: adduser + user: "{{ addusers.etcd }}" + when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) + - role: adduser + user: "{{ addusers.kube }}" + when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) diff --git a/kubespray/roles/etcd/tasks/check_certs.yml b/kubespray/roles/etcd/tasks/check_certs.yml new file mode 100644 index 0000000..c688c16 --- /dev/null +++ b/kubespray/roles/etcd/tasks/check_certs.yml @@ -0,0 +1,169 @@ +--- +- name: "Check_certs | Register certs that have already been generated on first etcd node" + find: + paths: "{{ etcd_cert_dir }}" + patterns: "ca.pem,node*.pem,member*.pem,admin*.pem" + get_checksum: true + delegate_to: "{{ groups['etcd'][0] }}" + register: etcdcert_master + run_once: true + +- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs' and 'etcd_secret_changed' to false" + set_fact: + sync_certs: false + gen_certs: false + etcd_secret_changed: false + +- name: "Check certs | Register ca and etcd admin/member certs on etcd hosts" + stat: + path: "{{ etcd_cert_dir }}/{{ item }}" + get_attributes: no + get_checksum: yes + get_mime: no + register: etcd_member_certs + when: inventory_hostname in groups['etcd'] + with_items: + - ca.pem + - member-{{ inventory_hostname }}.pem + - member-{{ inventory_hostname }}-key.pem + - admin-{{ inventory_hostname }}.pem + - admin-{{ inventory_hostname }}-key.pem + +- name: "Check certs | Register ca and etcd node certs on kubernetes hosts" + stat: + path: "{{ etcd_cert_dir }}/{{ item }}" + register: etcd_node_certs + when: inventory_hostname in groups['k8s_cluster'] + with_items: + - ca.pem + - node-{{ inventory_hostname }}.pem + - node-{{ inventory_hostname }}-key.pem + +- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)" + set_fact: + gen_certs: true + when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list + run_once: true + with_items: "{{ expected_files }}" + vars: + expected_files: >- + ['{{ etcd_cert_dir }}/ca.pem', + {% set etcd_members = groups['etcd'] %} + {% for host in etcd_members %} + '{{ etcd_cert_dir }}/admin-{{ host }}.pem', + '{{ etcd_cert_dir }}/admin-{{ host }}-key.pem', + '{{ etcd_cert_dir }}/member-{{ host }}.pem', + '{{ etcd_cert_dir }}/member-{{ host }}-key.pem', + {% endfor %} + {% set k8s_nodes = groups['kube_control_plane'] %} + {% for host in k8s_nodes %} + '{{ etcd_cert_dir }}/node-{{ host }}.pem', + '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' + {% if not loop.last %}{{','}}{% endif %} + {% endfor %}] + +- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)" + set_fact: + gen_certs: true + run_once: true + with_items: "{{ expected_files }}" + vars: + expected_files: >- + ['{{ etcd_cert_dir }}/ca.pem', + {% set etcd_members = groups['etcd'] %} + {% for host in etcd_members %} + '{{ etcd_cert_dir }}/admin-{{ host }}.pem', + '{{ etcd_cert_dir }}/admin-{{ host }}-key.pem', + '{{ etcd_cert_dir }}/member-{{ host }}.pem', + '{{ etcd_cert_dir }}/member-{{ host }}-key.pem', + {% endfor %} + {% set k8s_nodes = groups['k8s_cluster']|unique|sort %} + {% for host in k8s_nodes %} + '{{ etcd_cert_dir }}/node-{{ host }}.pem', + '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' + {% if not loop.last %}{{','}}{% endif %} + {% endfor %}] + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list + +- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node" + set_fact: + gen_master_certs: |- + { + {% set etcd_members = groups['etcd'] -%} + {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} + {% for host in etcd_members -%} + {% set member_cert = "%s/member-%s.pem"|format(etcd_cert_dir, host) %} + {% set member_key = "%s/member-%s-key.pem"|format(etcd_cert_dir, host) %} + {% set admin_cert = "%s/admin-%s.pem"|format(etcd_cert_dir, host) %} + {% set admin_key = "%s/admin-%s-key.pem"|format(etcd_cert_dir, host) %} + {% if force_etcd_cert_refresh -%} + "{{ host }}": True, + {% elif member_cert in existing_certs and member_key in existing_certs and admin_cert in existing_certs and admin_key in existing_certs -%} + "{{ host }}": False, + {% else -%} + "{{ host }}": True, + {% endif -%} + {% endfor %} + } + run_once: true + +- name: "Check_certs | Set 'gen_node_certs' object to track whether node certs exist on first etcd node" + set_fact: + gen_node_certs: |- + { + {% set k8s_nodes = groups['k8s_cluster'] -%} + {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} + {% for host in k8s_nodes -%} + {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %} + {% set host_key = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %} + {% if force_etcd_cert_refresh -%} + "{{ host }}": True, + {% elif host_cert in existing_certs and host_key in existing_certs -%} + "{{ host }}": False, + {% else -%} + "{{ host }}": True, + {% endif -%} + {% endfor %} + } + run_once: true + +- name: "Check_certs | Set 'etcd_member_requires_sync' to true if ca or member/admin cert and key don't exist on etcd member or checksum doesn't match" + set_fact: + etcd_member_requires_sync: true + when: + - inventory_hostname in groups['etcd'] + - (not etcd_member_certs.results[0].stat.exists|default(false)) or + (not etcd_member_certs.results[1].stat.exists|default(false)) or + (not etcd_member_certs.results[2].stat.exists|default(false)) or + (not etcd_member_certs.results[3].stat.exists|default(false)) or + (not etcd_member_certs.results[4].stat.exists|default(false)) or + (etcd_member_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[3].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[3].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_member_certs.results[4].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[4].stat.path)|map(attribute="checksum")|first|default('')) + +- name: "Check_certs | Set 'kubernetes_host_requires_sync' to true if ca or node cert and key don't exist on kubernetes host or checksum doesn't match" + set_fact: + kubernetes_host_requires_sync: true + when: + - inventory_hostname in groups['k8s_cluster'] and + inventory_hostname not in groups['etcd'] + - (not etcd_node_certs.results[0].stat.exists|default(false)) or + (not etcd_node_certs.results[1].stat.exists|default(false)) or + (not etcd_node_certs.results[2].stat.exists|default(false)) or + (etcd_node_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_node_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or + (etcd_node_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) + +- name: "Check_certs | Set 'sync_certs' to true" + set_fact: + sync_certs: true + when: + - etcd_member_requires_sync|default(false) or + kubernetes_host_requires_sync|default(false) or + (inventory_hostname in gen_master_certs and gen_master_certs[inventory_hostname]) or + (inventory_hostname in gen_node_certs and gen_node_certs[inventory_hostname]) diff --git a/kubespray/roles/etcd/tasks/configure.yml b/kubespray/roles/etcd/tasks/configure.yml new file mode 100644 index 0000000..7534e41 --- /dev/null +++ b/kubespray/roles/etcd/tasks/configure.yml @@ -0,0 +1,168 @@ +--- +- name: Configure | Check if etcd cluster is healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_cluster_is_healthy + failed_when: false + changed_when: false + check_mode: no + run_once: yes + when: is_etcd_master and etcd_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Check if etcd-events cluster is healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_events_cluster_is_healthy + failed_when: false + changed_when: false + check_mode: no + run_once: yes + when: is_etcd_master and etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- include_tasks: refresh_config.yml + when: is_etcd_master + +- name: Configure | Copy etcd.service systemd file + template: + src: "etcd-{{ etcd_deployment_type }}.service.j2" + dest: /etc/systemd/system/etcd.service + backup: yes + mode: 0644 + when: is_etcd_master and etcd_cluster_setup + +- name: Configure | Copy etcd-events.service systemd file + template: + src: "etcd-events-{{ etcd_deployment_type }}.service.j2" + dest: /etc/systemd/system/etcd-events.service + backup: yes + mode: 0644 + when: is_etcd_master and etcd_events_cluster_setup + +- name: Configure | reload systemd + systemd: + daemon_reload: true + when: is_etcd_master + +# when scaling new etcd will fail to start +- name: Configure | Ensure etcd is running + service: + name: etcd + state: started + enabled: yes + ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors + when: is_etcd_master and etcd_cluster_setup + +# when scaling new etcd will fail to start +- name: Configure | Ensure etcd-events is running + service: + name: etcd-events + state: started + enabled: yes + ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors + when: is_etcd_master and etcd_events_cluster_setup + +- name: Configure | Wait for etcd cluster to be healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_cluster_is_healthy + until: etcd_cluster_is_healthy.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + check_mode: no + run_once: yes + when: + - is_etcd_master + - etcd_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Wait for etcd-events cluster to be healthy + shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null" + args: + executable: /bin/bash + register: etcd_events_cluster_is_healthy + until: etcd_events_cluster_is_healthy.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + check_mode: no + run_once: yes + when: + - is_etcd_master + - etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- name: Configure | Check if member is in etcd cluster + shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}" + register: etcd_member_in_cluster + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + when: is_etcd_master and etcd_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Check if member is in etcd-events cluster + shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}" + register: etcd_events_member_in_cluster + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + when: is_etcd_master and etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- name: Configure | Join member(s) to etcd cluster one at a time + include_tasks: join_etcd_member.yml + with_items: "{{ groups['etcd'] }}" + when: inventory_hostname == item and etcd_cluster_setup and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0 + +- name: Configure | Join member(s) to etcd-events cluster one at a time + include_tasks: join_etcd-events_member.yml + with_items: "{{ groups['etcd'] }}" + when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0 diff --git a/kubespray/roles/etcd/tasks/gen_certs_script.yml b/kubespray/roles/etcd/tasks/gen_certs_script.yml new file mode 100644 index 0000000..eb97a82 --- /dev/null +++ b/kubespray/roles/etcd/tasks/gen_certs_script.yml @@ -0,0 +1,166 @@ +--- +- name: Gen_certs | create etcd cert dir + file: + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: "{{ etcd_owner }}" + mode: "{{ etcd_cert_dir_mode }}" + recurse: yes + +- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})" + file: + path: "{{ etcd_script_dir }}" + state: directory + owner: root + mode: 0700 + run_once: yes + when: inventory_hostname == groups['etcd'][0] + +- name: Gen_certs | write openssl config + template: + src: "openssl.conf.j2" + dest: "{{ etcd_config_dir }}/openssl.conf" + mode: 0640 + run_once: yes + delegate_to: "{{ groups['etcd'][0] }}" + when: + - gen_certs|default(false) + - inventory_hostname == groups['etcd'][0] + +- name: Gen_certs | copy certs generation script + template: + src: "make-ssl-etcd.sh.j2" + dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh" + mode: 0700 + run_once: yes + when: + - gen_certs|default(false) + - inventory_hostname == groups['etcd'][0] + +- name: Gen_certs | run cert generation script for etcd and kube control plane nodes + command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" + environment: + - MASTERS: "{% for m in groups['etcd'] %} + {% if gen_master_certs[m] %} + {{ m }} + {% endif %} + {% endfor %}" + - HOSTS: "{% for h in groups['kube_control_plane'] %} + {% if gen_node_certs[h] %} + {{ h }} + {% endif %} + {% endfor %}" + run_once: yes + delegate_to: "{{ groups['etcd'][0] }}" + when: gen_certs|default(false) + notify: set etcd_secret_changed + +- name: Gen_certs | run cert generation script for all clients + command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" + environment: + - HOSTS: "{% for h in groups['k8s_cluster'] %} + {% if gen_node_certs[h] %} + {{ h }} + {% endif %} + {% endfor %}" + run_once: yes + delegate_to: "{{ groups['etcd'][0] }}" + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - gen_certs|default(false) + notify: set etcd_secret_changed + +- name: Gen_certs | Gather etcd member/admin and kube_control_plane clinet certs from first etcd node + slurp: + src: "{{ item }}" + register: etcd_master_certs + with_items: + - "{{ etcd_cert_dir }}/ca.pem" + - "{{ etcd_cert_dir }}/ca-key.pem" + - "[{% for node in groups['etcd'] %} + '{{ etcd_cert_dir }}/admin-{{ node }}.pem', + '{{ etcd_cert_dir }}/admin-{{ node }}-key.pem', + '{{ etcd_cert_dir }}/member-{{ node }}.pem', + '{{ etcd_cert_dir }}/member-{{ node }}-key.pem', + {% endfor %}]" + - "[{% for node in (groups['kube_control_plane']) %} + '{{ etcd_cert_dir }}/node-{{ node }}.pem', + '{{ etcd_cert_dir }}/node-{{ node }}-key.pem', + {% endfor %}]" + delegate_to: "{{ groups['etcd'][0] }}" + when: + - inventory_hostname in groups['etcd'] + - sync_certs|default(false) + - inventory_hostname != groups['etcd'][0] + notify: set etcd_secret_changed + +- name: Gen_certs | Write etcd member/admin and kube_control_plane clinet certs to other etcd nodes + copy: + dest: "{{ item.item }}" + content: "{{ item.content | b64decode }}" + group: "{{ etcd_cert_group }}" + owner: "{{ etcd_owner }}" + mode: 0640 + with_items: "{{ etcd_master_certs.results }}" + when: + - inventory_hostname in groups['etcd'] + - sync_certs|default(false) + - inventory_hostname != groups['etcd'][0] + loop_control: + label: "{{ item.item }}" + +- name: Gen_certs | Gather node certs from first etcd node + slurp: + src: "{{ item }}" + register: etcd_master_node_certs + with_items: + - "[{% for node in groups['k8s_cluster'] %} + '{{ etcd_cert_dir }}/node-{{ node }}.pem', + '{{ etcd_cert_dir }}/node-{{ node }}-key.pem', + {% endfor %}]" + delegate_to: "{{ groups['etcd'][0] }}" + when: + - inventory_hostname in groups['etcd'] + - inventory_hostname != groups['etcd'][0] + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + notify: set etcd_secret_changed + +- name: Gen_certs | Write node certs to other etcd nodes + copy: + dest: "{{ item.item }}" + content: "{{ item.content | b64decode }}" + group: "{{ etcd_cert_group }}" + owner: "{{ etcd_owner }}" + mode: 0640 + with_items: "{{ etcd_master_node_certs.results }}" + when: + - inventory_hostname in groups['etcd'] + - inventory_hostname != groups['etcd'][0] + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + loop_control: + label: "{{ item.item }}" + +- include_tasks: gen_nodes_certs_script.yml + when: + - inventory_hostname in groups['kube_control_plane'] and + sync_certs|default(false) and inventory_hostname not in groups['etcd'] + +- include_tasks: gen_nodes_certs_script.yml + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] and + sync_certs|default(false) and inventory_hostname not in groups['etcd'] + +- name: Gen_certs | check certificate permissions + file: + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: "{{ etcd_owner }}" + mode: "{{ etcd_cert_dir_mode }}" + recurse: yes diff --git a/kubespray/roles/etcd/tasks/gen_nodes_certs_script.yml b/kubespray/roles/etcd/tasks/gen_nodes_certs_script.yml new file mode 100644 index 0000000..d176e01 --- /dev/null +++ b/kubespray/roles/etcd/tasks/gen_nodes_certs_script.yml @@ -0,0 +1,32 @@ +--- +- name: Gen_certs | Set cert names per node + set_fact: + my_etcd_node_certs: [ 'ca.pem', + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem'] + tags: + - facts + +- name: "Check_certs | Set 'sync_certs' to true on nodes" + set_fact: + sync_certs: true + with_items: + - "{{ my_etcd_node_certs }}" + +- name: Gen_certs | Gather node certs + shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0" + args: + executable: /bin/bash + warn: false + no_log: "{{ not (unsafe_show_logs|bool) }}" + register: etcd_node_certs + check_mode: no + delegate_to: "{{ groups['etcd'][0] }}" + changed_when: false + +- name: Gen_certs | Copy certs on nodes + shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}" + args: + executable: /bin/bash + no_log: "{{ not (unsafe_show_logs|bool) }}" + changed_when: false diff --git a/kubespray/roles/etcd/tasks/install_docker.yml b/kubespray/roles/etcd/tasks/install_docker.yml new file mode 100644 index 0000000..025a0ba --- /dev/null +++ b/kubespray/roles/etcd/tasks/install_docker.yml @@ -0,0 +1,45 @@ +--- +- import_tasks: install_etcdctl_docker.yml + when: etcd_cluster_setup + +- name: Get currently-deployed etcd version + shell: "{{ docker_bin_dir }}/docker ps --filter='name={{ etcd_member_name }}' --format='{{ '{{ .Image }}' }}'" + register: etcd_current_docker_image + when: etcd_cluster_setup + +- name: Get currently-deployed etcd-events version + shell: "{{ docker_bin_dir }}/docker ps --filter='name={{ etcd_member_name }}-events' --format='{{ '{{ .Image }}' }}'" + register: etcd_events_current_docker_image + when: etcd_events_cluster_setup + +- name: Restart etcd if necessary + command: /bin/true + notify: restart etcd + when: + - etcd_cluster_setup + - etcd_image_tag not in etcd_current_docker_image.stdout|default('') + +- name: Restart etcd-events if necessary + command: /bin/true + notify: restart etcd-events + when: + - etcd_events_cluster_setup + - etcd_image_tag not in etcd_events_current_docker_image.stdout|default('') + +- name: Install etcd launch script + template: + src: etcd.j2 + dest: "{{ bin_dir }}/etcd" + owner: 'root' + mode: 0750 + backup: yes + when: etcd_cluster_setup + +- name: Install etcd-events launch script + template: + src: etcd-events.j2 + dest: "{{ bin_dir }}/etcd-events" + owner: 'root' + mode: 0750 + backup: yes + when: etcd_events_cluster_setup diff --git a/kubespray/roles/etcd/tasks/install_etcdctl_docker.yml b/kubespray/roles/etcd/tasks/install_etcdctl_docker.yml new file mode 100644 index 0000000..74ae07f --- /dev/null +++ b/kubespray/roles/etcd/tasks/install_etcdctl_docker.yml @@ -0,0 +1,11 @@ +--- +- name: Install | Copy etcdctl binary from docker container + command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy; + {{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} && + {{ docker_bin_dir }}/docker cp etcdctl-binarycopy:/usr/local/bin/etcdctl {{ bin_dir }}/etcdctl && + {{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy" + register: etcdctl_install_result + until: etcdctl_install_result.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false diff --git a/kubespray/roles/etcd/tasks/install_host.yml b/kubespray/roles/etcd/tasks/install_host.yml new file mode 100644 index 0000000..14a75b4 --- /dev/null +++ b/kubespray/roles/etcd/tasks/install_host.yml @@ -0,0 +1,41 @@ +--- +- name: Get currently-deployed etcd version + command: "{{ bin_dir }}/etcd --version" + register: etcd_current_host_version + # There's a chance this play could run before etcd is installed at all + ignore_errors: true + when: etcd_cluster_setup + +- name: Restart etcd if necessary + command: /bin/true + notify: restart etcd + when: + - etcd_cluster_setup + - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') + +- name: Restart etcd-events if necessary + command: /bin/true + notify: restart etcd-events + when: + - etcd_events_cluster_setup + - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') + +- name: install | Download etcd and etcdctl + include_tasks: "../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.etcd) }}" + when: etcd_cluster_setup + tags: + - never + - etcd + +- name: install | Copy etcd and etcdctl binary from download dir + copy: + src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: yes + with_items: + - etcd + - etcdctl + when: etcd_cluster_setup diff --git a/kubespray/roles/etcd/tasks/join_etcd-events_member.yml b/kubespray/roles/etcd/tasks/join_etcd-events_member.yml new file mode 100644 index 0000000..8336f1a --- /dev/null +++ b/kubespray/roles/etcd/tasks/join_etcd-events_member.yml @@ -0,0 +1,47 @@ +--- +- name: Join Member | Add member to etcd-events cluster # noqa 301 305 + shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" + register: member_add_result + until: member_add_result.rc == 0 + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- include_tasks: refresh_config.yml + vars: + etcd_events_peer_addresses: >- + {% for host in groups['etcd'] -%} + {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%} + {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382, + {%- endif -%} + {%- if loop.last -%} + {{ etcd_member_name }}={{ etcd_events_peer_url }} + {%- endif -%} + {%- endfor -%} + +- name: Join Member | Ensure member is in etcd-events cluster + shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ etcd_events_access_address }} >/dev/null" + args: + executable: /bin/bash + register: etcd_events_member_in_cluster + changed_when: false + check_mode: no + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}" + +- name: Configure | Ensure etcd-events is running + service: + name: etcd-events + state: started + enabled: yes diff --git a/kubespray/roles/etcd/tasks/join_etcd_member.yml b/kubespray/roles/etcd/tasks/join_etcd_member.yml new file mode 100644 index 0000000..2244039 --- /dev/null +++ b/kubespray/roles/etcd/tasks/join_etcd_member.yml @@ -0,0 +1,51 @@ +--- +- name: Join Member | Add member to etcd cluster # noqa 301 305 + shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" + register: member_add_result + until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr + failed_when: member_add_result.rc != 0 and 'Peer URLs already exists' not in member_add_result.stderr + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- include_tasks: refresh_config.yml + vars: + etcd_peer_addresses: >- + {% for host in groups['etcd'] -%} + {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%} + {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380, + {%- endif -%} + {%- if loop.last -%} + {{ etcd_member_name }}={{ etcd_peer_url }} + {%- endif -%} + {%- endfor -%} + +- name: Join Member | Ensure member is in etcd cluster + shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ etcd_access_address }} >/dev/null" + args: + executable: /bin/bash + register: etcd_member_in_cluster + changed_when: false + check_mode: no + retries: "{{ etcd_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + until: etcd_member_in_cluster.rc == 0 + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Configure | Ensure etcd is running + service: + name: etcd + state: started + enabled: yes diff --git a/kubespray/roles/etcd/tasks/main.yml b/kubespray/roles/etcd/tasks/main.yml new file mode 100644 index 0000000..fb593db --- /dev/null +++ b/kubespray/roles/etcd/tasks/main.yml @@ -0,0 +1,77 @@ +--- +- include_tasks: check_certs.yml + when: cert_management == "script" + tags: + - etcd-secrets + - facts + +- include_tasks: "gen_certs_script.yml" + when: + - cert_management |d('script') == "script" + tags: + - etcd-secrets + +- include_tasks: upd_ca_trust.yml + when: + - inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort + tags: + - etcd-secrets + +- include_tasks: upd_ca_trust.yml + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] + tags: + - etcd-secrets + +- name: "Gen_certs | Get etcd certificate serials" + command: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial" + register: "etcd_client_cert_serial_result" + changed_when: false + check_mode: no + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] + tags: + - master + - network + +- name: Set etcd_client_cert_serial + set_fact: + etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" + when: + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + - inventory_hostname in groups['k8s_cluster'] + tags: + - master + - network + +- include_tasks: "install_{{ etcd_deployment_type }}.yml" + when: is_etcd_master + tags: + - upgrade + +- include_tasks: configure.yml + when: is_etcd_master + +- include_tasks: refresh_config.yml + when: is_etcd_master + +- name: Restart etcd if certs changed + command: /bin/true + notify: restart etcd + when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false) + +- name: Restart etcd-events if certs changed + command: /bin/true + notify: restart etcd + when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false) + +# After etcd cluster is assembled, make sure that +# initial state of the cluster is in `existing` +# state instead of `new`. +- include_tasks: refresh_config.yml + when: is_etcd_master diff --git a/kubespray/roles/etcd/tasks/refresh_config.yml b/kubespray/roles/etcd/tasks/refresh_config.yml new file mode 100644 index 0000000..57010fe --- /dev/null +++ b/kubespray/roles/etcd/tasks/refresh_config.yml @@ -0,0 +1,16 @@ +--- +- name: Refresh config | Create etcd config file + template: + src: etcd.env.j2 + dest: /etc/etcd.env + mode: 0640 + notify: restart etcd + when: is_etcd_master and etcd_cluster_setup + +- name: Refresh config | Create etcd-events config file + template: + src: etcd-events.env.j2 + dest: /etc/etcd-events.env + mode: 0640 + notify: restart etcd-events + when: is_etcd_master and etcd_events_cluster_setup diff --git a/kubespray/roles/etcd/tasks/upd_ca_trust.yml b/kubespray/roles/etcd/tasks/upd_ca_trust.yml new file mode 100644 index 0000000..f806d39 --- /dev/null +++ b/kubespray/roles/etcd/tasks/upd_ca_trust.yml @@ -0,0 +1,37 @@ +--- +- name: Gen_certs | target ca-certificate store file + set_fact: + ca_cert_path: |- + {% if ansible_os_family == "Debian" -%} + /usr/local/share/ca-certificates/etcd-ca.crt + {%- elif ansible_os_family == "RedHat" -%} + /etc/pki/ca-trust/source/anchors/etcd-ca.crt + {%- elif ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] -%} + /etc/ssl/certs/etcd-ca.pem + {%- elif ansible_os_family == "Suse" -%} + /etc/pki/trust/anchors/etcd-ca.pem + {%- elif ansible_os_family == "ClearLinux" -%} + /usr/share/ca-certs/etcd-ca.pem + {%- endif %} + tags: + - facts + +- name: Gen_certs | add CA to trusted CA dir + copy: + src: "{{ etcd_cert_dir }}/ca.pem" + dest: "{{ ca_cert_path }}" + remote_src: true + mode: 0640 + register: etcd_ca_cert + +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503 + command: update-ca-certificates + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"] + +- name: Gen_certs | update ca-certificates (RedHat) # noqa 503 + command: update-ca-trust extract + when: etcd_ca_cert.changed and ansible_os_family == "RedHat" + +- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503 + command: clrtrust add "{{ ca_cert_path }}" + when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux" diff --git a/kubespray/roles/etcd/templates/etcd-docker.service.j2 b/kubespray/roles/etcd/templates/etcd-docker.service.j2 new file mode 100644 index 0000000..4dfbd72 --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd-docker.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=etcd docker wrapper +Wants=docker.socket +After=docker.service + +[Service] +User=root +PermissionsStartOnly=true +EnvironmentFile=-/etc/etcd.env +ExecStart={{ bin_dir }}/etcd +ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name | default("etcd") }} +ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name | default("etcd") }} +Restart=always +RestartSec=15s +TimeoutStartSec=30s + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/etcd/templates/etcd-events-docker.service.j2 b/kubespray/roles/etcd/templates/etcd-events-docker.service.j2 new file mode 100644 index 0000000..271980a --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd-events-docker.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=etcd docker wrapper +Wants=docker.socket +After=docker.service + +[Service] +User=root +PermissionsStartOnly=true +EnvironmentFile=-/etc/etcd-events.env +ExecStart={{ bin_dir }}/etcd-events +ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name }}-events +ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name }}-events +Restart=always +RestartSec=15s +TimeoutStartSec=30s + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/etcd/templates/etcd-events-host.service.j2 b/kubespray/roles/etcd/templates/etcd-events-host.service.j2 new file mode 100644 index 0000000..6e0167a --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd-events-host.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=etcd +After=network.target + +[Service] +Type=notify +User=root +EnvironmentFile=/etc/etcd-events.env +ExecStart={{ bin_dir }}/etcd +NotifyAccess=all +Restart=always +RestartSec=10s +LimitNOFILE=40000 + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/etcd/templates/etcd-events.env.j2 b/kubespray/roles/etcd/templates/etcd-events.env.j2 new file mode 100644 index 0000000..3abefd6 --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd-events.env.j2 @@ -0,0 +1,43 @@ +ETCD_DATA_DIR={{ etcd_events_data_dir }} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }} +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }} +ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %} + +ETCD_METRICS={{ etcd_metrics }} +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2383,https://127.0.0.1:2383 +ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} +ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} +ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382 +ETCD_NAME={{ etcd_member_name }}-events +ETCD_PROXY=off +ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} +{% if etcd_snapshot_count is defined %} +ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }} +{% endif %} +{% if etcd_quota_backend_bytes is defined %} +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} +{% endif %} +{% if etcd_max_request_bytes is defined %} +ETCD_MAX_REQUEST_BYTES={{ etcd_max_request_bytes }} +{% endif %} + +# TLS settings +ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}} + +ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} + +{% if etcd_tls_cipher_suites is defined %} +ETCD_CIPHER_SUITES={% for tls in etcd_tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} +{% endif %} + +{% for key, value in etcd_extra_vars.items() %} +{{ key }}={{ value }} +{% endfor %} diff --git a/kubespray/roles/etcd/templates/etcd-events.j2 b/kubespray/roles/etcd/templates/etcd-events.j2 new file mode 100644 index 0000000..b268479 --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd-events.j2 @@ -0,0 +1,21 @@ +#!/bin/bash +{{ docker_bin_dir }}/docker run \ + --restart=on-failure:5 \ + --env-file=/etc/etcd-events.env \ + --net=host \ + -v /etc/ssl/certs:/etc/ssl/certs:ro \ + -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ + -v {{ etcd_events_data_dir }}:{{ etcd_events_data_dir }}:rw \ + {% if etcd_memory_limit is defined %} + --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ + {% endif %} + {% if etcd_cpu_limit is defined %} + --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ + {% endif %} + {% if etcd_blkio_weight is defined %} + --blkio-weight={{ etcd_blkio_weight }} \ + {% endif %} + --name={{ etcd_member_name }}-events \ + {{ etcd_image_repo }}:{{ etcd_image_tag }} \ + /usr/local/bin/etcd \ + "$@" diff --git a/kubespray/roles/etcd/templates/etcd-host.service.j2 b/kubespray/roles/etcd/templates/etcd-host.service.j2 new file mode 100644 index 0000000..6bba805 --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd-host.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=etcd +After=network.target + +[Service] +Type=notify +User=root +EnvironmentFile=/etc/etcd.env +ExecStart={{ bin_dir }}/etcd +NotifyAccess=all +Restart=always +RestartSec=10s +LimitNOFILE=40000 + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/etcd/templates/etcd.env.j2 b/kubespray/roles/etcd/templates/etcd.env.j2 new file mode 100644 index 0000000..18395c9 --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd.env.j2 @@ -0,0 +1,68 @@ +# Environment file for etcd {{ etcd_version }} +ETCD_DATA_DIR={{ etcd_data_dir }} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }} +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }} +ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %} + +ETCD_METRICS={{ etcd_metrics }} +{% if etcd_metrics_port is defined %} +ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }} +{% endif %} +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2379,https://127.0.0.1:2379 +ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} +ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} +ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380 +ETCD_NAME={{ etcd_member_name }} +ETCD_PROXY=off +ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} +{% if etcd_snapshot_count is defined %} +ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }} +{% endif %} +{% if etcd_quota_backend_bytes is defined %} +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} +{% endif %} +{% if etcd_max_request_bytes is defined %} +ETCD_MAX_REQUEST_BYTES={{ etcd_max_request_bytes }} +{% endif %} +{% if etcd_log_level is defined %} +ETCD_LOG_LEVEL={{ etcd_log_level }} +{% endif %} +{% if etcd_max_snapshots is defined %} +ETCD_MAX_SNAPSHOTS={{ etcd_max_snapshots }} +{% endif %} +{% if etcd_max_wals is defined %} +ETCD_MAX_WALS={{ etcd_max_wals }} +{% endif %} +# Flannel need etcd v2 API +ETCD_ENABLE_V2=true + +# TLS settings +ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}} + +ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} + +{% if etcd_tls_cipher_suites is defined %} +ETCD_CIPHER_SUITES={% for tls in etcd_tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} +{% endif %} + +{% for key, value in etcd_extra_vars.items() %} +{{ key }}={{ value }} +{% endfor %} + +# CLI settings +ETCDCTL_ENDPOINTS=https://127.0.0.1:2379 +ETCDCTL_CACERT={{ etcd_cert_dir }}/ca.pem +ETCDCTL_KEY={{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem +ETCDCTL_CERT={{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem + +# ETCD 3.5.x issue +# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer +ETCD_EXPERIMENTAL_INITIAL_CORRUPT_CHECK={{ etcd_experimental_initial_corrupt_check }} \ No newline at end of file diff --git a/kubespray/roles/etcd/templates/etcd.j2 b/kubespray/roles/etcd/templates/etcd.j2 new file mode 100644 index 0000000..5374c70 --- /dev/null +++ b/kubespray/roles/etcd/templates/etcd.j2 @@ -0,0 +1,21 @@ +#!/bin/bash +{{ docker_bin_dir }}/docker run \ + --restart=on-failure:5 \ + --env-file=/etc/etcd.env \ + --net=host \ + -v /etc/ssl/certs:/etc/ssl/certs:ro \ + -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ + -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \ +{% if etcd_memory_limit is defined %} + --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ +{% endif %} +{% if etcd_cpu_limit is defined %} + --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ +{% endif %} +{% if etcd_blkio_weight is defined %} + --blkio-weight={{ etcd_blkio_weight }} \ +{% endif %} + --name={{ etcd_member_name | default("etcd") }} \ + {{ etcd_image_repo }}:{{ etcd_image_tag }} \ + /usr/local/bin/etcd \ + "$@" diff --git a/kubespray/roles/etcd/templates/make-ssl-etcd.sh.j2 b/kubespray/roles/etcd/templates/make-ssl-etcd.sh.j2 new file mode 100644 index 0000000..d727cff --- /dev/null +++ b/kubespray/roles/etcd/templates/make-ssl-etcd.sh.j2 @@ -0,0 +1,103 @@ +#!/bin/bash + +# Author: Smana smainklh@gmail.com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail +usage() +{ + cat << EOF +Create self signed certificates + +Usage : $(basename $0) -f [-d ] + -h | --help : Show this message + -f | --config : Openssl configuration file + -d | --ssldir : Directory where the certificates will be installed + + ex : + $(basename $0) -f openssl.conf -d /srv/ssl +EOF +} + +# Options parsing +while (($#)); do + case "$1" in + -h | --help) usage; exit 0;; + -f | --config) CONFIG=${2}; shift 2;; + -d | --ssldir) SSLDIR="${2}"; shift 2;; + *) + usage + echo "ERROR : Unknown option" + exit 3 + ;; + esac +done + +if [ -z ${CONFIG} ]; then + echo "ERROR: the openssl configuration file is missing. option -f" + exit 1 +fi +if [ -z ${SSLDIR} ]; then + SSLDIR="/etc/ssl/etcd" +fi + +tmpdir=$(mktemp -d /tmp/etcd_cacert.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +mkdir -p "${SSLDIR}" + +# Root CA +if [ -e "$SSLDIR/ca-key.pem" ]; then + # Reuse existing CA + cp $SSLDIR/{ca.pem,ca-key.pem} . +else + openssl genrsa -out ca-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca-key.pem -days {{certificates_duration}} -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1 +fi + +# ETCD member +if [ -n "$MASTERS" ]; then + for host in $MASTERS; do + cn="${host%%.*}" + # Member key + openssl genrsa -out member-${host}-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + + # Admin key + openssl genrsa -out admin-${host}-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1 + openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + done +fi + +# Node keys +if [ -n "$HOSTS" ]; then + for host in $HOSTS; do + cn="${host%%.*}" + openssl genrsa -out node-${host}-key.pem {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1 + openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + done +fi + +# Install certs +if [ -e "$SSLDIR/ca-key.pem" ]; then + # No pass existing CA + rm -f ca.pem ca-key.pem +fi + +mv *.pem ${SSLDIR}/ diff --git a/kubespray/roles/etcd/templates/openssl.conf.j2 b/kubespray/roles/etcd/templates/openssl.conf.j2 new file mode 100644 index 0000000..f6681a1 --- /dev/null +++ b/kubespray/roles/etcd/templates/openssl.conf.j2 @@ -0,0 +1,45 @@ +{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names + +[ ssl_client ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +subjectAltName = @alt_names + +[ v3_ca ] +basicConstraints = CA:TRUE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +authorityKeyIdentifier=keyid:always,issuer + +[alt_names] +DNS.1 = localhost +{% for host in groups['etcd'] %} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} +{% endfor %} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} +{% endif %} +{% for etcd_alt_name in etcd_cert_alt_names %} +DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }} +{% endfor %} +{% for host in groups['etcd'] %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% for cert_alt_ip in etcd_cert_alt_ips %} +IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }} +{% endfor %} +IP.{{ counter["ip"] }} = 127.0.0.1 diff --git a/kubespray/roles/etcdctl/tasks/main.yml b/kubespray/roles/etcdctl/tasks/main.yml new file mode 100644 index 0000000..fca078c --- /dev/null +++ b/kubespray/roles/etcdctl/tasks/main.yml @@ -0,0 +1,65 @@ +--- +# To get the binary from container to host, use the etcd data directory mounted +# rw from host into the container. + +- name: Check unintentional include of this role + assert: + that: etcd_deployment_type == "kubeadm" + +- name: Check if etcdctl exist + stat: + path: "{{ bin_dir }}/etcdctl" + get_attributes: no + get_checksum: no + get_mime: no + register: stat_etcdctl + +- block: + - name: Check version + command: "{{ bin_dir }}/etcdctl version" + register: etcdctl_version + check_mode: no + changed_when: false + + - name: Remove old binary if version is not OK + file: + path: "{{ bin_dir }}/etcdctl" + state: absent + when: etcd_version.lstrip('v') not in etcdctl_version.stdout + when: stat_etcdctl.stat.exists + +- name: Check if etcdctl still exist after version check + stat: + path: "{{ bin_dir }}/etcdctl" + get_attributes: no + get_checksum: no + get_mime: no + register: stat_etcdctl + +- block: + - name: Copy etcdctl script to host + shell: "{{ docker_bin_dir }}/docker exec \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\" cp /usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl" + when: container_manager == "docker" + + - name: Copy etcdctl script to host + shell: "{{ bin_dir }}/crictl exec \"$({{ bin_dir }}/crictl ps -q --image {{ etcd_image_repo }}:{{ etcd_image_tag }})\" cp /usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl" + when: container_manager in ['crio', 'containerd'] + + - name: Copy etcdctl to {{ bin_dir }} + copy: + src: "{{ etcd_data_dir }}/etcdctl" + dest: "{{ bin_dir }}" + remote_src: true + mode: 0755 + when: not stat_etcdctl.stat.exists + +- name: Remove binary in etcd data dir + file: + path: "{{ etcd_data_dir }}/etcdctl" + state: absent + +- name: Create etcdctl wrapper script + template: + src: etcdctl.sh.j2 + dest: "{{ bin_dir }}/etcdctl.sh" + mode: 0755 diff --git a/kubespray/roles/etcdctl/templates/etcdctl.sh.j2 b/kubespray/roles/etcdctl/templates/etcdctl.sh.j2 new file mode 100644 index 0000000..266bcfd --- /dev/null +++ b/kubespray/roles/etcdctl/templates/etcdctl.sh.j2 @@ -0,0 +1,8 @@ +#!/bin/bash +# {{ ansible_managed }} +# example invocation: etcdctl.sh get --keys-only --from-key "" + +etcdctl \ + --cacert {{ kube_cert_dir }}/etcd/ca.crt \ + --cert {{ kube_cert_dir }}/etcd/server.crt \ + --key {{ kube_cert_dir }}/etcd/server.key "$@" diff --git a/kubespray/roles/helm-apps/README.md b/kubespray/roles/helm-apps/README.md new file mode 100644 index 0000000..27b480c --- /dev/null +++ b/kubespray/roles/helm-apps/README.md @@ -0,0 +1,39 @@ +Role Name +========= + +This role is intended to be used to fetch and deploy Helm Charts as part of +cluster installation or upgrading with kubespray. + +Requirements +------------ + +The role needs to be executed on a host with access to the Kubernetes API, and +with the helm binary in place. + +Role Variables +-------------- + +See meta/argument_specs.yml + +Playbook example: + +```yaml +--- +- hosts: kube_control_plane[0] + gather_facts: no + roles: + - name: helm-apps + releases: + - name: app + namespace: app + chart_ref: simple-app/simple-app + - name: app2 + namespace: app + chart_ref: simple-app/simple-app + wait_timeout: "10m" # override the same option in `release_common_opts` + repositories: "{{ repos }}" + - repo_name: simple-app + repo_url: "https://blog.leiwang.info/simple-app" + release_common_opts: "{{ helm_params }}" + wait_timeout: "5m" +``` diff --git a/kubespray/roles/helm-apps/meta/argument_specs.yml b/kubespray/roles/helm-apps/meta/argument_specs.yml new file mode 100644 index 0000000..d1be9a8 --- /dev/null +++ b/kubespray/roles/helm-apps/meta/argument_specs.yml @@ -0,0 +1,93 @@ +--- +argument_specs: + main: + short_description: Install a list of Helm charts. + options: + releases: + type: list + elements: dict + required: true + description: | + List of dictionaries passed as arguments to kubernetes.core.helm. + Arguments passed here will override those in `helm_settings`. For + structure of the dictionary, see the documentation for + kubernetes.core.helm ansible module. + options: + chart_ref: + type: path + required: true + chart_version: + type: str + name: + type: str + required: true + namespace: + type: str + required: true + values: + type: dict + # Possibly general options + create_namespace: + type: bool + chart_repo_url: + type: str + disable_hook: + type: bool + history_max: + type: int + purge: + type: bool + replace: + type: bool + skip_crds: + type: bool + wait: + type: bool + default: true + wait_timeout: + type: str + + repositories: + type: list + elements: dict + description: | + List of dictionaries passed as arguments to + kubernetes.core.helm_repository. + default: [] + options: + name: + type: str + required: true + password: + type: str + username: + type: str + url: + type: str + release_common_opts: + type: dict + description: | + Common arguments for every helm invocation. + default: {} + options: + create_namespace: + type: bool + default: true + chart_repo_url: + type: str + disable_hook: + type: bool + history_max: + type: int + purge: + type: bool + replace: + type: bool + skip_crds: + type: bool + wait: + type: bool + default: true + wait_timeout: + type: str + default: "5m" diff --git a/kubespray/roles/helm-apps/tasks/main.yml b/kubespray/roles/helm-apps/tasks/main.yml new file mode 100644 index 0000000..ed55c5a --- /dev/null +++ b/kubespray/roles/helm-apps/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Add Helm repositories + kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}" + loop: "{{ repositories }}" + +- name: Update Helm repositories + kubernetes.core.helm: + state: absent + binary_path: "{{ bin_dir }}/helm" + release_name: dummy # trick needed to refresh in separate step + release_namespace: kube-system + update_repo_cache: true + when: repositories != [] + +- name: Install Helm Applications + kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}" + loop: "{{ releases }}" diff --git a/kubespray/roles/helm-apps/vars/main.yml b/kubespray/roles/helm-apps/vars/main.yml new file mode 100644 index 0000000..a7baa66 --- /dev/null +++ b/kubespray/roles/helm-apps/vars/main.yml @@ -0,0 +1,7 @@ +--- +helm_defaults: + atomic: true + binary_path: "{{ bin_dir }}/helm" + +helm_repository_defaults: + binary_path: "{{ bin_dir }}/helm" diff --git a/kubespray/roles/kubernetes-apps/ansible/defaults/main.yml b/kubespray/roles/kubernetes-apps/ansible/defaults/main.yml new file mode 100644 index 0000000..66b7673 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/defaults/main.yml @@ -0,0 +1,93 @@ +--- +# Limits for coredns +dns_memory_limit: 300Mi +dns_cpu_requests: 100m +dns_memory_requests: 70Mi +dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}" +dns_nodes_per_replica: 16 +dns_cores_per_replica: 256 +dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}" +enable_coredns_reverse_dns_lookups: true +coredns_ordinal_suffix: "" +# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] +coredns_deployment_nodeselector: "kubernetes.io/os: linux" +coredns_default_zone_cache_block: | + cache 30 + +# dns_upstream_forward_extra_opts apply to coredns forward section as well as nodelocaldns upstream target forward section +# dns_upstream_forward_extra_opts: +# policy: sequential + +# nodelocaldns +nodelocaldns_cpu_requests: 100m +nodelocaldns_memory_limit: 200Mi +nodelocaldns_memory_requests: 70Mi +nodelocaldns_ds_nodeselector: "kubernetes.io/os: linux" +nodelocaldns_prometheus_port: 9253 +nodelocaldns_secondary_prometheus_port: 9255 + +# Limits for dns-autoscaler +dns_autoscaler_cpu_requests: 20m +dns_autoscaler_memory_requests: 10Mi +dns_autoscaler_deployment_nodeselector: "kubernetes.io/os: linux" +# dns_autoscaler_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] + +# etcd metrics +# etcd_metrics_service_labels: +# k8s-app: etcd +# app.kubernetes.io/managed-by: Kubespray +# app: kube-prometheus-stack-kube-etcd +# release: prometheus-stack + +# Netchecker +deploy_netchecker: false +netchecker_port: 31081 +agent_report_interval: 15 +netcheck_namespace: default + +# Limits for netchecker apps +netchecker_agent_cpu_limit: 30m +netchecker_agent_memory_limit: 100M +netchecker_agent_cpu_requests: 15m +netchecker_agent_memory_requests: 64M +netchecker_server_cpu_limit: 100m +netchecker_server_memory_limit: 256M +netchecker_server_cpu_requests: 50m +netchecker_server_memory_requests: 64M +netchecker_etcd_cpu_limit: 200m +netchecker_etcd_memory_limit: 256M +netchecker_etcd_cpu_requests: 100m +netchecker_etcd_memory_requests: 128M + +# SecurityContext when PodSecurityPolicy is enabled +netchecker_agent_user: 1000 +netchecker_server_user: 1000 +netchecker_agent_group: 1000 +netchecker_server_group: 1000 + +# Dashboard +dashboard_replicas: 1 + +# Namespace for dashboard +dashboard_namespace: kube-system + +# Limits for dashboard +dashboard_cpu_limit: 100m +dashboard_memory_limit: 256M +dashboard_cpu_requests: 50m +dashboard_memory_requests: 64M + +# Set dashboard_use_custom_certs to true if overriding dashboard_certs_secret_name with a secret that +# contains dashboard_tls_key_file and dashboard_tls_cert_file instead of using the initContainer provisioned certs +dashboard_use_custom_certs: false +dashboard_certs_secret_name: kubernetes-dashboard-certs +dashboard_tls_key_file: dashboard.key +dashboard_tls_cert_file: dashboard.crt +dashboard_master_toleration: true + +# Override dashboard default settings +dashboard_token_ttl: 900 +dashboard_skip_login: false + +# Policy Controllers +# policy_controller_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}] diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml new file mode 100644 index 0000000..fef5246 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -0,0 +1,44 @@ +--- +- name: Kubernetes Apps | Register coredns deployment annotation `createdby` + command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" + register: createdby_annotation_deploy + changed_when: false + check_mode: false + ignore_errors: true # noqa ignore-errors + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Register coredns service annotation `createdby` + command: "{{ kubectl }} get svc -n kube-system coredns -o jsonpath='{ .metadata.annotations.createdby }'" + register: createdby_annotation_svc + changed_when: false + check_mode: false + ignore_errors: true # noqa ignore-errors + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Delete kubeadm CoreDNS + kube: + name: "coredns" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "deploy" + state: absent + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + - createdby_annotation_deploy.stdout != 'kubespray' + +- name: Kubernetes Apps | Delete kubeadm Kube-DNS service + kube: + name: "kube-dns" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "svc" + state: absent + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + - createdby_annotation_svc.stdout != 'kubespray' diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/coredns.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/coredns.yml new file mode 100644 index 0000000..d8f8547 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/coredns.yml @@ -0,0 +1,44 @@ +--- +- name: Kubernetes Apps | Lay Down CoreDNS templates + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + loop: + - { name: coredns, file: coredns-clusterrole.yml, type: clusterrole } + - { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding } + - { name: coredns, file: coredns-config.yml, type: configmap } + - { name: coredns, file: coredns-deployment.yml, type: deployment } + - { name: coredns, file: coredns-sa.yml, type: sa } + - { name: coredns, file: coredns-svc.yml, type: svc } + - { name: dns-autoscaler, file: dns-autoscaler.yml, type: deployment } + - { name: dns-autoscaler, file: dns-autoscaler-clusterrole.yml, type: clusterrole } + - { name: dns-autoscaler, file: dns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding } + - { name: dns-autoscaler, file: dns-autoscaler-sa.yml, type: sa } + register: coredns_manifests + vars: + clusterIP: "{{ skydns_server }}" + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - coredns + +- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template + template: + src: "{{ item.src }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment } + - { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc } + - { name: dns-autoscaler, src: dns-autoscaler.yml, file: coredns-autoscaler-secondary.yml, type: deployment } + register: coredns_secondary_manifests + vars: + clusterIP: "{{ skydns_server_secondary }}" + coredns_ordinal_suffix: "-secondary" + when: + - dns_mode == 'coredns_dual' + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - coredns diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/dashboard.yml new file mode 100644 index 0000000..480b3db --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -0,0 +1,21 @@ +--- +- name: Kubernetes Apps | Lay down dashboard template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { file: dashboard.yml, type: deploy, name: kubernetes-dashboard } + register: manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Start dashboard + kube: + name: "{{ item.item.name }}" + namespace: "{{ dashboard_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/etcd_metrics.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/etcd_metrics.yml new file mode 100644 index 0000000..548de89 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/etcd_metrics.yml @@ -0,0 +1,22 @@ +--- +- name: Kubernetes Apps | Lay down etcd_metrics templates + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { file: etcd_metrics-endpoints.yml, type: endpoints, name: etcd-metrics } + - { file: etcd_metrics-service.yml, type: service, name: etcd-metrics } + register: manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Start etcd_metrics + kube: + name: "{{ item.item.name }}" + namespace: kube-system + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/main.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/main.yml new file mode 100644 index 0000000..4a0180e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: Kubernetes Apps | Wait for kube-apiserver + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + client_cert: "{{ kube_apiserver_client_cert }}" + client_key: "{{ kube_apiserver_client_key }}" + register: result + until: result.status == 200 + retries: 20 + delay: 1 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Cleanup DNS + import_tasks: cleanup_dns.yml + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + - coredns + - nodelocaldns + +- name: Kubernetes Apps | CoreDNS + import_tasks: "coredns.yml" + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - coredns + +- name: Kubernetes Apps | nodelocalDNS + import_tasks: "nodelocaldns.yml" + when: + - enable_nodelocaldns + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + +- name: Kubernetes Apps | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ coredns_manifests.results | default({}) }}" + - "{{ coredns_secondary_manifests.results | default({}) }}" + - "{{ nodelocaldns_manifests.results | default({}) }}" + - "{{ nodelocaldns_second_manifests.results | default({}) }}" + when: + - dns_mode != 'none' + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + register: resource_result + until: resource_result is succeeded + retries: 4 + delay: 5 + tags: + - coredns + - nodelocaldns + loop_control: + label: "{{ item.item.file }}" + +- name: Kubernetes Apps | Etcd metrics endpoints + import_tasks: etcd_metrics.yml + when: etcd_metrics_port is defined and etcd_metrics_service_labels is defined + tags: + - etcd_metrics + +- name: Kubernetes Apps | Netchecker + import_tasks: netchecker.yml + when: deploy_netchecker + tags: + - netchecker + +- name: Kubernetes Apps | Dashboard + import_tasks: dashboard.yml + when: dashboard_enabled + tags: + - dashboard diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/netchecker.yml new file mode 100644 index 0000000..b83fd33 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -0,0 +1,56 @@ +--- +- name: Kubernetes Apps | Check AppArmor status + command: which apparmor_parser + register: apparmor_status + when: + - inventory_hostname == groups['kube_control_plane'][0] + failed_when: false + +- name: Kubernetes Apps | Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Netchecker Templates list + set_fact: + netchecker_templates: + - {file: netchecker-ns.yml, type: ns, name: netchecker-namespace} + - {file: netchecker-agent-sa.yml, type: sa, name: netchecker-agent} + - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent} + - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet} + - {file: netchecker-server-sa.yml, type: sa, name: netchecker-server} + - {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server} + - {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server} + - {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server} + - {file: netchecker-server-svc.yml, type: svc, name: netchecker-service} + netchecker_templates_for_psp: + - {file: netchecker-agent-hostnet-psp.yml, type: podsecuritypolicy, name: netchecker-agent-hostnet-policy} + - {file: netchecker-agent-hostnet-clusterrole.yml, type: clusterrole, name: netchecker-agent} + - {file: netchecker-agent-hostnet-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-agent} + +- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy + set_fact: + netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}" + when: podsecuritypolicy_enabled + +- name: Kubernetes Apps | Lay Down Netchecker Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: "{{ netchecker_templates }}" + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Start Netchecker Resources + kube: + name: "{{ item.item.name }}" + namespace: "{{ netcheck_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/kubespray/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml new file mode 100644 index 0000000..b94509f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml @@ -0,0 +1,75 @@ +--- +- name: Kubernetes Apps | set up necessary nodelocaldns parameters + set_fact: + primaryClusterIP: >- + {%- if dns_mode in ['coredns', 'coredns_dual'] -%} + {{ skydns_server }} + {%- elif dns_mode == 'manual' -%} + {{ manual_dns_server }} + {%- endif -%} + secondaryclusterIP: "{{ skydns_server_secondary }}" + when: + - enable_nodelocaldns + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + - coredns + +- name: Kubernetes Apps | Lay Down nodelocaldns Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { name: nodelocaldns, file: nodelocaldns-config.yml, type: configmap } + - { name: nodelocaldns, file: nodelocaldns-sa.yml, type: sa } + - { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset } + register: nodelocaldns_manifests + vars: + forwardTarget: >- + {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} + {{ primaryClusterIP }} {{ secondaryclusterIP }} + {%- else -%} + {{ primaryClusterIP }} + {%- endif -%} + upstreamForwardTarget: >- + {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} + {{ upstream_dns_servers|join(' ') }} + {%- else -%} + /etc/resolv.conf + {%- endif -%} + when: + - enable_nodelocaldns + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + - coredns + +- name: Kubernetes Apps | Lay Down nodelocaldns-secondary Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset } + register: nodelocaldns_second_manifests + vars: + forwardTarget: >- + {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} + {{ primaryClusterIP }} {{ secondaryclusterIP }} + {%- else -%} + {{ primaryClusterIP }} + {%- endif -%} + upstreamForwardTarget: >- + {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} + {{ upstream_dns_servers|join(' ') }} + {%- else -%} + /etc/resolv.conf + {%- endif -%} + when: + - enable_nodelocaldns + - enable_nodelocaldns_secondary + - inventory_hostname == groups['kube_control_plane'] | first + tags: + - nodelocaldns + - coredns diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 new file mode 100644 index 0000000..79c4e77 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: Reconcile + name: system:coredns +rules: + - apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..af7f684 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: EnsureExists + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: + - kind: ServiceAccount + name: coredns + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 new file mode 100644 index 0000000..44eea93 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -0,0 +1,74 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | +{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %} +{% for block in coredns_external_zones %} + {{ block['zones'] | join(' ') }} { + log + errors +{% if block['rewrite'] is defined and block['rewrite']|length > 0 %} +{% for rewrite_match in block['rewrite'] %} + rewrite {{ rewrite_match }} +{% endfor %} +{% endif %} + forward . {{ block['nameservers'] | join(' ') }} + loadbalance + cache {{ block['cache'] | default(5) }} + reload +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} +{% endif %} + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes {{ dns_domain }} {% if enable_coredns_reverse_dns_lookups %}in-addr.arpa ip6.arpa {% endif %}{ + pods insecure +{% if enable_coredns_k8s_endpoint_pod_names %} + endpoint_pod_names +{% endif %} +{% if enable_coredns_reverse_dns_lookups %} + fallthrough in-addr.arpa ip6.arpa +{% endif %} + } + prometheus :9153 + forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} { + prefer_udp + max_concurrent 1000 +{% if dns_upstream_forward_extra_opts is defined %} +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} +{% endif %} + } +{% if enable_coredns_k8s_external %} + k8s_external {{ coredns_k8s_external_zone }} +{% endif %} + {{ coredns_default_zone_cache_block | indent(width=8, first=False) }} + loop + reload + loadbalance +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% if dns_etchosts | default(None) %} + hosts: | + {{ dns_etchosts | indent(width=4, first=False) }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 new file mode 100644 index 0000000..fa81069 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -0,0 +1,119 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "coredns{{ coredns_ordinal_suffix }}" + namespace: kube-system + labels: + k8s-app: "kube-dns{{ coredns_ordinal_suffix }}" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 10% + selector: + matchLabels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + template: + metadata: + labels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + createdby: 'kubespray' + spec: + nodeSelector: + {{ coredns_deployment_nodeselector }} + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% if dns_extra_tolerations is defined %} + {{ dns_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "" + containers: + - name: coredns + image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: {{ dns_memory_limit }} + requests: + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +{% if dns_etchosts | default(None) %} + - key: hosts + path: hosts +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 new file mode 100644 index 0000000..daebd6a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 new file mode 100644 index 0000000..0e051c3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: coredns{{ coredns_ordinal_suffix }} + namespace: kube-system + labels: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}" + addonmanager.kubernetes.io/mode: Reconcile + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + createdby: 'kubespray' +spec: + selector: + k8s-app: kube-dns{{ coredns_ordinal_suffix }} + clusterIP: {{ clusterIP }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 new file mode 100644 index 0000000..b0c3419 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -0,0 +1,339 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration to deploy release version of the Dashboard UI compatible with +# Kubernetes 1.8. +# +# Example usage: kubectl create -f + +{% if dashboard_namespace != "kube-system" %} +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ dashboard_namespace }} + labels: + name: {{ dashboard_namespace }} +{% endif %} + +--- +# ------------------- Dashboard Secrets ------------------- # +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: {{ dashboard_namespace }} +type: Opaque + +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: {{ dashboard_namespace }} +type: Opaque +data: + csrf: "" + +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: {{ dashboard_namespace }} +type: Opaque + +--- +# ------------------- Dashboard ConfigMap ------------------- # +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: {{ dashboard_namespace }} + +--- +# ------------------- Dashboard Service Account ------------------- # + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} + +--- +# ------------------- Dashboard Role & Role Binding ------------------- # +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} + +--- +# ------------------- Dashboard Deployment ------------------- # + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +spec: + replicas: {{ dashboard_replicas }} + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + priorityClassName: system-cluster-critical + containers: + - name: kubernetes-dashboard + image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ dashboard_cpu_limit }} + memory: {{ dashboard_memory_limit }} + requests: + cpu: {{ dashboard_cpu_requests }} + memory: {{ dashboard_memory_requests }} + ports: + - containerPort: 8443 + protocol: TCP + args: + - --namespace={{ dashboard_namespace }} +{% if dashboard_use_custom_certs %} + - --tls-key-file={{ dashboard_tls_key_file }} + - --tls-cert-file={{ dashboard_tls_cert_file }} +{% else %} + - --auto-generate-certificates +{% endif %} +{% if dashboard_skip_login %} + - --enable-skip-login +{% endif %} + - --authentication-mode=token + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + - --token-ttl={{ dashboard_token_ttl }} + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: {{ dashboard_certs_secret_name }} + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard +{% if dashboard_master_toleration %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% endif %} + +--- +# ------------------- Dashboard Service ------------------- # + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ dashboard_namespace }} +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- +# ------------------- Metrics Scrapper Service Account ------------------- # + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +# ------------------- Metrics Scrapper Service ------------------- # +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-metrics-scraper + name: dashboard-metrics-scraper + namespace: {{ dashboard_namespace }} +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: kubernetes-metrics-scraper + +--- + +# ------------------- Metrics Scrapper Deployment ------------------- # +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-metrics-scraper + name: kubernetes-metrics-scraper + namespace: {{ dashboard_namespace }} +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-metrics-scraper + template: + metadata: + labels: + k8s-app: kubernetes-metrics-scraper + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + priorityClassName: system-cluster-critical + containers: + - name: kubernetes-metrics-scraper + image: {{ dashboard_metrics_scraper_repo }}:{{ dashboard_metrics_scraper_tag }} + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + serviceAccountName: kubernetes-dashboard + volumes: + - name: tmp-volume + emptyDir: {} +{% if dashboard_master_toleration %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrole.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrole.yml.j2 new file mode 100644 index 0000000..ef642ce --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrole.yml.j2 @@ -0,0 +1,34 @@ +--- +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:dns-autoscaler + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..da1a0a9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-clusterrolebinding.yml.j2 @@ -0,0 +1,29 @@ +--- +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:dns-autoscaler + labels: + addonmanager.kubernetes.io/mode: Reconcile +subjects: + - kind: ServiceAccount + name: dns-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:dns-autoscaler + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-sa.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-sa.yml.j2 new file mode 100644 index 0000000..3ce9b51 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler-sa.yml.j2 @@ -0,0 +1,22 @@ +--- +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: dns-autoscaler + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 new file mode 100644 index 0000000..6ea1651 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 @@ -0,0 +1,87 @@ +--- +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dns-autoscaler{{ coredns_ordinal_suffix }} + namespace: kube-system + labels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + template: + metadata: + labels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + nodeSelector: + {{ dns_autoscaler_deployment_nodeselector}} + priorityClassName: system-cluster-critical + securityContext: + supplementalGroups: [ 65534 ] + fsGroup: 65534 + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +{% if dns_autoscaler_extra_tolerations is defined %} + {{ dns_autoscaler_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "" + containers: + - name: autoscaler + image: "{{ dnsautoscaler_image_repo }}:{{ dnsautoscaler_image_tag }}" + resources: + requests: + cpu: {{ dns_autoscaler_cpu_requests }} + memory: {{ dns_autoscaler_memory_requests }} + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --default-params={"linear":{"preventSinglePointFailure":{{ dns_prevent_single_point_failure }},"coresPerReplica":{{ dns_cores_per_replica }},"nodesPerReplica":{{ dns_nodes_per_replica }},"min":{{ dns_min_replicas }}}} + - --logtostderr=true + - --v=2 + - --configmap=dns-autoscaler{{ coredns_ordinal_suffix }} + - --target=Deployment/coredns{{ coredns_ordinal_suffix }} + serviceAccountName: dns-autoscaler diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/etcd_metrics-endpoints.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/etcd_metrics-endpoints.yml.j2 new file mode 100644 index 0000000..18f515d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/etcd_metrics-endpoints.yml.j2 @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Endpoints +metadata: + name: etcd-metrics + namespace: kube-system + labels: + k8s-app: etcd + app.kubernetes.io/managed-by: Kubespray +subsets: +{% for etcd_metrics_address, etcd_host in etcd_metrics_addresses.split(',') | zip(etcd_hosts) %} + - addresses: + - ip: {{ etcd_metrics_address | urlsplit('hostname') }} + targetRef: + kind: Node + name: {{ etcd_host }} + ports: + - name: http-metrics + port: {{ etcd_metrics_address | urlsplit('port') }} + protocol: TCP +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2 new file mode 100644 index 0000000..5bd9254 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: etcd-metrics + namespace: kube-system + labels: + {{ etcd_metrics_service_labels | to_yaml(indent=2, width=1337) | indent(width=4) }} +spec: + ports: + - name: http-metrics + protocol: TCP + port: {{ etcd_metrics_port }} + # targetPort: diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 new file mode 100644 index 0000000..47dbf70 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: netchecker-agent + name: netchecker-agent + namespace: {{ netcheck_namespace }} +spec: + selector: + matchLabels: + app: netchecker-agent + template: + metadata: + name: netchecker-agent + labels: + app: netchecker-agent + spec: + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + tolerations: + - effect: NoSchedule + operator: Exists + nodeSelector: + kubernetes.io/os: linux + containers: + - name: netchecker-agent + image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + args: + - "-v=5" + - "-alsologtostderr=true" + - "-serverendpoint=netchecker-service:8081" + - "-reportinterval={{ agent_report_interval }}" + resources: + limits: + cpu: {{ netchecker_agent_cpu_limit }} + memory: {{ netchecker_agent_memory_limit }} + requests: + cpu: {{ netchecker_agent_cpu_requests }} + memory: {{ netchecker_agent_memory_requests }} + securityContext: + runAsUser: {{ netchecker_agent_user | default('0') }} + runAsGroup: {{ netchecker_agent_group | default('0') }} + serviceAccountName: netchecker-agent + updateStrategy: + rollingUpdate: + maxUnavailable: 100% + type: RollingUpdate diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 new file mode 100644 index 0000000..0e23150 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 @@ -0,0 +1,14 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:netchecker-agent-hostnet + namespace: {{ netcheck_namespace }} +rules: + - apiGroups: + - policy + resourceNames: + - netchecker-agent-hostnet + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..cf44515 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:netchecker-agent-hostnet + namespace: {{ netcheck_namespace }} +subjects: + - kind: ServiceAccount + name: netchecker-agent + namespace: {{ netcheck_namespace }} +roleRef: + kind: ClusterRole + name: psp:netchecker-agent-hostnet + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 new file mode 100644 index 0000000..8b2e51a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: netchecker-agent-hostnet + name: netchecker-agent-hostnet + namespace: {{ netcheck_namespace }} +spec: + selector: + matchLabels: + app: netchecker-agent-hostnet + template: + metadata: + name: netchecker-agent-hostnet + labels: + app: netchecker-agent-hostnet + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + tolerations: + - effect: NoSchedule + operator: Exists + containers: + - name: netchecker-agent + image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + args: + - "-v=5" + - "-alsologtostderr=true" + - "-serverendpoint=netchecker-service:8081" + - "-reportinterval={{ agent_report_interval }}" + resources: + limits: + cpu: {{ netchecker_agent_cpu_limit }} + memory: {{ netchecker_agent_memory_limit }} + requests: + cpu: {{ netchecker_agent_cpu_requests }} + memory: {{ netchecker_agent_memory_requests }} + securityContext: + runAsUser: {{ netchecker_agent_user | default('0') }} + runAsGroup: {{ netchecker_agent_group | default('0') }} + serviceAccountName: netchecker-agent + updateStrategy: + rollingUpdate: + maxUnavailable: 100% + type: RollingUpdate diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 new file mode 100644 index 0000000..21b397d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: netchecker-agent-hostnet + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 new file mode 100644 index 0000000..c544043 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: netchecker-agent + namespace: {{ netcheck_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2 new file mode 100644 index 0000000..3dd87aa --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2 @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ netcheck_namespace }}" + labels: + name: "{{ netcheck_namespace }}" diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 new file mode 100644 index 0000000..290dec3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 @@ -0,0 +1,9 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["list", "get"] diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..55301b7 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} +subjects: + - kind: ServiceAccount + name: netchecker-server + namespace: {{ netcheck_namespace }} +roleRef: + kind: ClusterRole + name: netchecker-server + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 new file mode 100644 index 0000000..edda5c5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} + labels: + app: netchecker-server +spec: + replicas: 1 + selector: + matchLabels: + app: netchecker-server + template: + metadata: + name: netchecker-server + labels: + app: netchecker-server + spec: + priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + volumes: + - name: etcd-data + emptyDir: {} + containers: + - name: netchecker-server + image: "{{ netcheck_server_image_repo }}:{{ netcheck_server_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ netchecker_server_cpu_limit }} + memory: {{ netchecker_server_memory_limit }} + requests: + cpu: {{ netchecker_server_cpu_requests }} + memory: {{ netchecker_server_memory_requests }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + runAsUser: {{ netchecker_server_user | default('0') }} + runAsGroup: {{ netchecker_server_group | default('0') }} + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + ports: + - containerPort: 8081 + args: + - -v=5 + - -logtostderr + - -kubeproxyinit=false + - -endpoint=0.0.0.0:8081 + - -etcd-endpoints=http://127.0.0.1:2379 + - name: etcd + image: "{{ etcd_image_repo }}:{{ netcheck_etcd_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - etcd + - --listen-client-urls=http://127.0.0.1:2379 + - --advertise-client-urls=http://127.0.0.1:2379 + - --data-dir=/var/lib/etcd + - --enable-v2 + - --force-new-cluster + volumeMounts: + - mountPath: /var/lib/etcd + name: etcd-data + resources: + limits: + cpu: {{ netchecker_etcd_cpu_limit }} + memory: {{ netchecker_etcd_memory_limit }} + requests: + cpu: {{ netchecker_etcd_cpu_requests }} + memory: {{ netchecker_etcd_memory_requests }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + runAsUser: {{ netchecker_server_user | default('0') }} + runAsGroup: {{ netchecker_server_group | default('0') }} + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + tolerations: + - effect: NoSchedule + operator: Exists + serviceAccountName: netchecker-server diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 new file mode 100644 index 0000000..e3ec07f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2 new file mode 100644 index 0000000..dc38946 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2 @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: netchecker-service + namespace: {{ netcheck_namespace }} +spec: + selector: + app: netchecker-server + ports: + - + protocol: TCP + port: 8081 + targetPort: 8081 + nodePort: {{ netchecker_port }} + type: NodePort diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 new file mode 100644 index 0000000..231c8ba --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 @@ -0,0 +1,182 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nodelocaldns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists + +data: + Corefile: | +{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %} +{% for block in nodelocaldns_external_zones %} + {{ block['zones'] | join(' ') }} { + errors + cache {{ block['cache'] | default(30) }} + reload +{% if block['rewrite'] is defined and block['rewrite']|length > 0 %} +{% for rewrite_match in block['rewrite'] %} + rewrite {{ rewrite_match }} +{% endfor %} +{% endif %} + loop + bind {{ nodelocaldns_ip }} + forward . {{ block['nameservers'] | join(' ') }} + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + log +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} +{% endif %} + {{ dns_domain }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + health {{ nodelocaldns_ip }}:{{ nodelocaldns_health_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} + } + .:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} + }{% endif %} + + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% if enable_nodelocaldns_secondary %} + Corefile-second: | +{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %} +{% for block in nodelocaldns_external_zones %} + {{ block['zones'] | join(' ') }} { + errors + cache {{ block['cache'] | default(30) }} + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ block['nameservers'] | join(' ') }} + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + log +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} +{% endif %} + {{ dns_domain }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + health {{ nodelocaldns_ip }}:{{ nodelocaldns_second_health_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ forwardTarget }} { + force_tcp + } + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} + } + .:53 { + errors + cache 30 + reload + loop + bind {{ nodelocaldns_ip }} + forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} + }{% endif %} + + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} +{% if dns_etchosts | default(None) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endif %} +{% if dns_etchosts | default(None) %} + hosts: | + {{ dns_etchosts | indent(width=4, first=False) }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2 new file mode 100644 index 0000000..7c63e28 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2 @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nodelocaldns + namespace: kube-system + labels: + k8s-app: kube-dns + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nodelocaldns + template: + metadata: + labels: + k8s-app: nodelocaldns + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '{{ nodelocaldns_prometheus_port }}' + spec: + nodeSelector: + {{ nodelocaldns_ds_nodeselector }} + priorityClassName: system-cluster-critical + serviceAccountName: nodelocaldns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - effect: NoSchedule + operator: "Exists" + - effect: NoExecute + operator: "Exists" + containers: + - name: node-cache + image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}" + resources: + limits: + memory: {{ nodelocaldns_memory_limit }} + requests: + cpu: {{ nodelocaldns_cpu_requests }} + memory: {{ nodelocaldns_memory_requests }} + args: + - -localip + - {{ nodelocaldns_ip }} + - -conf + - /etc/coredns/Corefile + - -upstreamsvc + - coredns +{% if enable_nodelocaldns_secondary %} + - -skipteardown +{% else %} + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP +{% endif %} + securityContext: + privileged: true +{% if nodelocaldns_bind_metrics_host_ip %} + env: + - name: MY_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + livenessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: config-volume + configMap: + name: nodelocaldns + items: + - key: Corefile + path: Corefile +{% if dns_etchosts | default(None) %} + - key: hosts + path: hosts +{% endif %} + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2 new file mode 100644 index 0000000..bd962d8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2 @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nodelocaldns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2 b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2 new file mode 100644 index 0000000..037bf44 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2 @@ -0,0 +1,103 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nodelocaldns-second + namespace: kube-system + labels: + k8s-app: kube-dns + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nodelocaldns-second + template: + metadata: + labels: + k8s-app: nodelocaldns-second + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '{{ nodelocaldns_secondary_prometheus_port }}' + spec: + nodeSelector: + {{ nodelocaldns_ds_nodeselector }} + priorityClassName: system-cluster-critical + serviceAccountName: nodelocaldns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - effect: NoSchedule + operator: "Exists" + - effect: NoExecute + operator: "Exists" + containers: + - name: node-cache + image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}" + resources: + limits: + memory: {{ nodelocaldns_memory_limit }} + requests: + cpu: {{ nodelocaldns_cpu_requests }} + memory: {{ nodelocaldns_memory_requests }} + args: [ "-localip", "{{ nodelocaldns_ip }}", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns", "-skipteardown" ] + securityContext: + privileged: true +{% if nodelocaldns_bind_metrics_host_ip %} + env: + - name: MY_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + livenessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + host: {{ nodelocaldns_ip }} + path: /health + port: {{ nodelocaldns_health_port }} + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: xtables-lock + mountPath: /run/xtables.lock + lifecycle: + preStop: + exec: + command: + - sh + - -c + - sleep {{ nodelocaldns_secondary_skew_seconds }} && kill -9 1 + volumes: + - name: config-volume + configMap: + name: nodelocaldns + items: + - key: Corefile-second + path: Corefile +{% if dns_etchosts | default(None) %} + - key: hosts + path: hosts +{% endif %} + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Implement a time skew between the main nodelocaldns and this secondary. + # Since the two nodelocaldns instances share the :53 port, we want to keep + # at least one running at any time enven if the manifests are replaced simultaneously + terminationGracePeriodSeconds: {{ nodelocaldns_secondary_skew_seconds }} + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/kubespray/roles/kubernetes-apps/argocd/defaults/main.yml b/kubespray/roles/kubernetes-apps/argocd/defaults/main.yml new file mode 100644 index 0000000..64f8a36 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/argocd/defaults/main.yml @@ -0,0 +1,5 @@ +--- +argocd_enabled: false +argocd_version: v2.5.5 +argocd_namespace: argocd +# argocd_admin_password: diff --git a/kubespray/roles/kubernetes-apps/argocd/tasks/main.yml b/kubespray/roles/kubernetes-apps/argocd/tasks/main.yml new file mode 100644 index 0000000..a6a4450 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/argocd/tasks/main.yml @@ -0,0 +1,79 @@ +--- +- name: Kubernetes Apps | Install yq + become: yes + get_url: + url: "https://github.com/mikefarah/yq/releases/download/v4.30.6/yq_linux_{{ host_architecture }}" + dest: "{{ bin_dir }}/yq" + mode: '0755' + +- name: Kubernetes Apps | Set ArgoCD template list + set_fact: + argocd_templates: + - name: namespace + file: argocd-namespace.yml + - name: install + file: argocd-install.yml + namespace: "{{ argocd_namespace }}" + url: "https://raw.githubusercontent.com/argoproj/argo-cd/{{ argocd_version }}/manifests/install.yaml" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Download ArgoCD remote manifests + become: yes + get_url: + url: "{{ item.url }}" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}" + loop_control: + label: "{{ item.file }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Set ArgoCD namespace for remote manifests + become: yes + command: | + {{ bin_dir }}/yq eval-all -i '.metadata.namespace="{{ argocd_namespace }}"' {{ kube_config_dir }}/{{ item.file }} + with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}" + loop_control: + label: "{{ item.file }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Create ArgoCD manifests from templates + become: yes + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: "{{ argocd_templates | selectattr('url', 'undefined') | list }}" + loop_control: + label: "{{ item.file }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Install ArgoCD + become: yes + kube: + name: ArgoCD + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.file }}" + state: latest + with_items: "{{ argocd_templates }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +# https://github.com/argoproj/argo-cd/blob/master/docs/faq.md#i-forgot-the-admin-password-how-do-i-reset-it +- name: Kubernetes Apps | Set ArgoCD custom admin password + become: yes + shell: | + {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n {{ argocd_namespace }} patch secret argocd-secret -p \ + '{ + "stringData": { + "admin.password": "{{ argocd_admin_password | password_hash('bcrypt') }}", + "admin.passwordMtime": "'$(date +%FT%T%Z)'" + } + }' + when: + - argocd_admin_password is defined + - "inventory_hostname == groups['kube_control_plane'][0]" diff --git a/kubespray/roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2 b/kubespray/roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2 new file mode 100644 index 0000000..99962f1 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{argocd_namespace}} + labels: + app: argocd diff --git a/kubespray/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml b/kubespray/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml new file mode 100644 index 0000000..9d7ddf0 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml @@ -0,0 +1,6 @@ +--- + +oci_security_list_management: All +oci_use_instance_principals: false +oci_cloud_controller_version: 0.7.0 +oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci diff --git a/kubespray/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml b/kubespray/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml new file mode 100644 index 0000000..9eb8794 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml @@ -0,0 +1,67 @@ +--- + +- name: "OCI Cloud Controller | Credentials Check | oci_private_key" + fail: + msg: "oci_private_key is missing" + when: + - not oci_use_instance_principals + - oci_private_key is not defined or not oci_private_key + +- name: "OCI Cloud Controller | Credentials Check | oci_region_id" + fail: + msg: "oci_region_id is missing" + when: + - not oci_use_instance_principals + - oci_region_id is not defined or not oci_region_id + +- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id" + fail: + msg: "oci_tenancy_id is missing" + when: + - not oci_use_instance_principals + - oci_tenancy_id is not defined or not oci_tenancy_id + +- name: "OCI Cloud Controller | Credentials Check | oci_user_id" + fail: + msg: "oci_user_id is missing" + when: + - not oci_use_instance_principals + - oci_user_id is not defined or not oci_user_id + +- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint" + fail: + msg: "oci_user_fingerprint is missing" + when: + - not oci_use_instance_principals + - oci_user_fingerprint is not defined or not oci_user_fingerprint + +- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id" + fail: + msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides" + when: + - oci_compartment_id is not defined or not oci_compartment_id + +- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id" + fail: + msg: "oci_vnc_id is missing. This is the Virtual Cloud Network in which the cluster resides" + when: + - oci_vnc_id is not defined or not oci_vnc_id + +- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id" + fail: + msg: "oci_subnet1_id is missingg. This is the first subnet to which loadbalancers will be added" + when: + - oci_subnet1_id is not defined or not oci_subnet1_id + +- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id" + fail: + msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability" + when: + - oci_cloud_controller_version is version_compare('0.7.0', '<') + - oci_subnet2_id is not defined or not oci_subnet2_id + +- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management" + fail: + msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)." + when: + - oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"] diff --git a/kubespray/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/kubespray/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml new file mode 100644 index 0000000..2224ae5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml @@ -0,0 +1,34 @@ +--- + +- include: credentials-check.yml + +- name: "OCI Cloud Controller | Generate Cloud Provider Configuration" + template: + src: controller-manager-config.yml.j2 + dest: "{{ kube_config_dir }}/controller-manager-config.yml" + mode: 0644 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: "OCI Cloud Controller | Slurp Configuration" + slurp: + src: "{{ kube_config_dir }}/controller-manager-config.yml" + register: controller_manager_config + +- name: "OCI Cloud Controller | Encode Configuration" + set_fact: + controller_manager_config_base64: "{{ controller_manager_config.content }}" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: "OCI Cloud Controller | Generate Manifests" + template: + src: oci-cloud-provider.yml.j2 + dest: "{{ kube_config_dir }}/oci-cloud-provider.yml" + mode: 0644 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: "OCI Cloud Controller | Apply Manifests" + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/oci-cloud-provider.yml" + state: latest + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 b/kubespray/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 new file mode 100644 index 0000000..b8dcc60 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 @@ -0,0 +1,90 @@ +{% macro private_key() %}{{ oci_private_key }}{% endmacro %} + +{% if oci_use_instance_principals %} + # (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm). + # Ensure you have setup the following OCI policies and your kubernetes nodes are running within them + # allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name] + # allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name] + # allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name] +useInstancePrincipals: true +{% else %} +useInstancePrincipals: false +{% endif %} + +auth: + +{% if oci_use_instance_principals %} + # This key is put here too for backwards compatibility + useInstancePrincipals: true +{% else %} + useInstancePrincipals: false + + region: {{ oci_region_id }} + tenancy: {{ oci_tenancy_id }} + user: {{ oci_user_id }} + key: | + {{ oci_private_key }} + + {% if oci_private_key_passphrase is defined %} + passphrase: {{ oci_private_key_passphrase }} + {% endif %} + + + fingerprint: {{ oci_user_fingerprint }} +{% endif %} + +# compartment configures Compartment within which the cluster resides. +compartment: {{ oci_compartment_id }} + +# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides. +vcn: {{ oci_vnc_id }} + +loadBalancer: + # subnet1 configures one of two subnets to which load balancers will be added. + # OCI load balancers require two subnets to ensure high availability. + subnet1: {{ oci_subnet1_id }} +{% if oci_subnet2_id is defined %} + # subnet2 configures the second of two subnets to which load balancers will be + # added. OCI load balancers require two subnets to ensure high availability. + subnet2: {{ oci_subnet2_id }} +{% endif %} + # SecurityListManagementMode configures how security lists are managed by the CCM. + # "All" (default): Manage all required security list rules for load balancer services. + # "Frontend": Manage only security list rules for ingress to the load + # balancer. Requires that the user has setup a rule that + # allows inbound traffic to the appropriate ports for kube + # proxy health port, node port ranges, and health check port ranges. + # E.g. 10.82.0.0/16 30000-32000. + # "None": Disables all security list management. Requires that the + # user has setup a rule that allows inbound traffic to the + # appropriate ports for kube proxy health port, node port + # ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000. + # Additionally requires the user to mange rules to allow + # inbound traffic to load balancers. + securityListManagementMode: {{ oci_security_list_management }} + +{% if oci_security_lists is defined and oci_security_lists|length > 0 %} + # Optional specification of which security lists to modify per subnet. This does not apply if security list management is off. + securityLists: +{% for subnet_ocid, list_ocid in oci_security_lists.items() %} + {{ subnet_ocid }}: {{ list_ocid }} +{% endfor %} +{% endif %} + +{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %} +# Optional rate limit controls for accessing OCI API +rateLimiter: +{% if oci_rate_limit.rate_limit_qps_read %} + rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }} +{% endif %} +{% if oci_rate_limit.rate_limit_qps_write %} + rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }} +{% endif %} +{% if oci_rate_limit.rate_limit_bucket_read %} + rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }} +{% endif %} +{% if oci_rate_limit.rate_limit_bucket_write %} + rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }} +{% endif %} +{% endif %} + diff --git a/kubespray/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 b/kubespray/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 new file mode 100644 index 0000000..bacd1e9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 @@ -0,0 +1,73 @@ +apiVersion: v1 +data: + cloud-provider.yaml: {{ controller_manager_config_base64 }} +kind: Secret +metadata: + name: oci-cloud-controller-manager + namespace: kube-system +type: Opaque + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: oci-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: oci-cloud-controller-manager +spec: + selector: + matchLabels: + component: oci-cloud-controller-manager + tier: control-plane + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + component: oci-cloud-controller-manager + tier: control-plane + spec: +{% if oci_cloud_controller_pull_secret is defined %} + imagePullSecrets: + - name: {{oci_cloud_controller_pull_secret}} +{% endif %} + serviceAccountName: cloud-controller-manager + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + volumes: + - name: cfg + secret: + secretName: oci-cloud-controller-manager + - name: kubernetes + hostPath: + path: /etc/kubernetes + containers: + - name: oci-cloud-controller-manager + image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}} + command: ["/usr/local/bin/oci-cloud-controller-manager"] + args: + - --cloud-config=/etc/oci/cloud-provider.yaml + - --cloud-provider=oci + - --leader-elect-resource-lock=configmaps + - -v=2 + volumeMounts: + - name: cfg + mountPath: /etc/oci + readOnly: true + - name: kubernetes + mountPath: /etc/kubernetes + readOnly: true + diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/defaults/main.yml b/kubespray/roles/kubernetes-apps/cluster_roles/defaults/main.yml new file mode 100644 index 0000000..f26583d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/defaults/main.yml @@ -0,0 +1,65 @@ +--- + +podsecuritypolicy_restricted_spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + runAsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + +podsecuritypolicy_privileged_spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: + - '*' + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + runAsGroup: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags + allowedUnsafeSysctls: + - '*' diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml b/kubespray/roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml new file mode 100644 index 0000000..479fb57 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml @@ -0,0 +1,8 @@ +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: k8s-cluster-critical +value: 1000000000 +globalDefault: false +description: "This priority class should only be used by the pods installed using kubespray." diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/files/oci-rbac.yml b/kubespray/roles/kubernetes-apps/cluster_roles/files/oci-rbac.yml new file mode 100644 index 0000000..5e3b82b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/files/oci-rbac.yml @@ -0,0 +1,124 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloud-controller-manager +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + +- apiGroups: + - "" + resources: + - services + verbs: + - list + - watch + - patch + +- apiGroups: + - "" + resources: + - services/status + verbs: + - update + +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +# For leader election +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + +- apiGroups: + - "" + resources: + - endpoints + resourceNames: + - "cloud-controller-manager" + verbs: + - get + - list + - watch + - update + +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - "cloud-controller-manager" + verbs: + - get + - update + +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + +# For the PVL +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - list + - watch + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: oci-cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/kubespray/roles/kubernetes-apps/cluster_roles/tasks/main.yml new file mode 100644 index 0000000..ddbddba --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -0,0 +1,109 @@ +--- +- name: Kubernetes Apps | Wait for kube-apiserver + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + client_cert: "{{ kube_apiserver_client_cert }}" + client_key: "{{ kube_apiserver_client_key }}" + register: result + until: result.status == 200 + retries: 10 + delay: 6 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes + template: + src: "node-crb.yml.j2" + dest: "{{ kube_config_dir }}/node-crb.yml" + mode: 0640 + register: node_crb_manifest + when: + - rbac_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Apply workaround to allow all nodes with cert O=system:nodes to register + kube: + name: "kubespray:system:node" + kubectl: "{{ bin_dir }}/kubectl" + resource: "clusterrolebinding" + filename: "{{ kube_config_dir }}/node-crb.yml" + state: latest + register: result + until: result is succeeded + retries: 10 + delay: 6 + when: + - rbac_enabled + - node_crb_manifest.changed + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet + template: + src: "node-webhook-cr.yml.j2" + dest: "{{ kube_config_dir }}/node-webhook-cr.yml" + mode: 0640 + register: node_webhook_cr_manifest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- name: Apply webhook ClusterRole + kube: + name: "system:node-webhook" + kubectl: "{{ bin_dir }}/kubectl" + resource: "clusterrole" + filename: "{{ kube_config_dir }}/node-webhook-cr.yml" + state: latest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - node_webhook_cr_manifest.changed + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole + template: + src: "node-webhook-crb.yml.j2" + dest: "{{ kube_config_dir }}/node-webhook-crb.yml" + mode: 0640 + register: node_webhook_crb_manifest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- name: Grant system:nodes the webhook ClusterRole + kube: + name: "system:node-webhook" + kubectl: "{{ bin_dir }}/kubectl" + resource: "clusterrolebinding" + filename: "{{ kube_config_dir }}/node-webhook-crb.yml" + state: latest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - node_webhook_crb_manifest.changed + - inventory_hostname == groups['kube_control_plane'][0] + tags: node-webhook + +- include_tasks: oci.yml + tags: oci + when: + - cloud_provider is defined + - cloud_provider == 'oci' + +- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file + copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640 + when: inventory_hostname == groups['kube_control_plane']|last + +- name: PriorityClass | Create k8s-cluster-critical + kube: + name: k8s-cluster-critical + kubectl: "{{ bin_dir }}/kubectl" + resource: "PriorityClass" + filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" + state: latest + when: inventory_hostname == groups['kube_control_plane']|last diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/kubespray/roles/kubernetes-apps/cluster_roles/tasks/oci.yml new file mode 100644 index 0000000..eb07463 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/tasks/oci.yml @@ -0,0 +1,19 @@ +--- +- name: Copy OCI RBAC Manifest + copy: + src: "oci-rbac.yml" + dest: "{{ kube_config_dir }}/oci-rbac.yml" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider == 'oci' + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Apply OCI RBAC + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/oci-rbac.yml" + when: + - cloud_provider is defined + - cloud_provider == 'oci' + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/kubespray/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 new file mode 100644 index 0000000..f2e115a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "kube-system" diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 b/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 new file mode 100644 index 0000000..9a4a3c4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: kubespray:system:node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 b/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 new file mode 100644 index 0000000..bf9aaf7 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:node-webhook +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 b/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 new file mode 100644 index 0000000..68aed5c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:node-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-webhook +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes diff --git a/kubespray/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 new file mode 100644 index 0000000..99da046 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:vsphere-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:vsphere-cloud-provider +roleRef: + kind: ClusterRole + name: system:vsphere-cloud-provider + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: vsphere-cloud-provider + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml b/kubespray/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml new file mode 100644 index 0000000..c82c5d8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: kubernetes-apps/container_engine_accelerator/nvidia_gpu + when: nvidia_accelerator_enabled + tags: + - apps + - nvidia_gpu + - container_engine_accelerator diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml new file mode 100644 index 0000000..6e870e4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml @@ -0,0 +1,14 @@ +--- +nvidia_accelerator_enabled: false +nvidia_driver_version: "390.87" +nvidia_gpu_tesla_base_url: https://us.download.nvidia.com/tesla/ +nvidia_gpu_gtx_base_url: http://us.download.nvidia.com/XFree86/Linux-x86_64/ +nvidia_gpu_flavor: tesla +nvidia_url_end: "{{ nvidia_driver_version }}/NVIDIA-Linux-x86_64-{{ nvidia_driver_version }}.run" +nvidia_driver_install_container: false +nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +nvidia_driver_install_ubuntu_container: registry.k8s.io/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +nvidia_driver_install_supported: false +nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" +nvidia_gpu_nodes: [] +nvidia_gpu_device_plugin_memory: 30Mi diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml new file mode 100644 index 0000000..62ecaf9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml @@ -0,0 +1,55 @@ +--- + +- name: Container Engine Acceleration Nvidia GPU| gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + skip: true + +- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla + set_fact: + nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}" + when: nvidia_gpu_flavor|lower == "tesla" + +- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX + set_fact: + nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}" + when: nvidia_gpu_flavor|lower == "gtx" + +- name: Container Engine Acceleration Nvidia GPU | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/container_engine_accelerator" + owner: root + group: root + mode: 0755 + recurse: true + +- name: Container Engine Acceleration Nvidia GPU | Create manifests for nvidia accelerators + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.file }}" + mode: 0644 + with_items: + - { name: nvidia-driver-install-daemonset, file: nvidia-driver-install-daemonset.yml, type: daemonset } + - { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset } + register: container_engine_accelerator_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container + +- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ container_engine_accelerator_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 new file mode 100644 index 0000000..c5a7f51 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nvidia-gpu-device-plugin + namespace: kube-system + labels: + k8s-app: nvidia-gpu-device-plugin + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nvidia-gpu-device-plugin + template: + metadata: + labels: + k8s-app: nvidia-gpu-device-plugin + spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "nvidia.com/gpu" + operator: Exists + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + volumes: + - name: device-plugin + hostPath: + path: /var/lib/kubelet/device-plugins + - name: dev + hostPath: + path: /dev + containers: + - image: "{{ nvidia_gpu_device_plugin_container }}" + command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] + name: nvidia-gpu-device-plugin + resources: + requests: + cpu: 50m + memory: {{ nvidia_gpu_device_plugin_memory }} + limits: + cpu: 50m + memory: {{ nvidia_gpu_device_plugin_memory }} + securityContext: + privileged: true + volumeMounts: + - name: device-plugin + mountPath: /device-plugin + - name: dev + mountPath: /dev + updateStrategy: + type: RollingUpdate diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 new file mode 100644 index 0000000..ea097ed --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 @@ -0,0 +1,82 @@ +# Copyright 2017 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nvidia-driver-installer + namespace: kube-system +spec: + selector: + matchLabels: + name: nvidia-driver-installer + template: + metadata: + labels: + name: nvidia-driver-installer + spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "nvidia.com/gpu" + operator: Exists + tolerations: + - key: "nvidia.com/gpu" + effect: "NoSchedule" + operator: "Exists" + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + volumes: + - name: dev + hostPath: + path: /dev + - name: nvidia-install-dir-host + hostPath: + path: /home/kubernetes/bin/nvidia + - name: root-mount + hostPath: + path: / + initContainers: + - image: "{{ nvidia_driver_install_container }}" + name: nvidia-driver-installer + resources: + requests: + cpu: 0.15 + securityContext: + privileged: true + env: + - name: NVIDIA_INSTALL_DIR_HOST + value: /home/kubernetes/bin/nvidia + - name: NVIDIA_INSTALL_DIR_CONTAINER + value: /usr/local/nvidia + - name: ROOT_MOUNT_DIR + value: /root + - name: NVIDIA_DRIVER_VERSION + value: "{{ nvidia_driver_version }}" + - name: NVIDIA_DRIVER_DOWNLOAD_URL + value: "{{ nvidia_driver_download_url_default }}" + volumeMounts: + - name: nvidia-install-dir-host + mountPath: /usr/local/nvidia + - name: dev + mountPath: /dev + - name: root-mount + mountPath: /root + containers: + - image: "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" + name: pause diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml new file mode 100644 index 0000000..b1ea65b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml @@ -0,0 +1,3 @@ +--- +nvidia_driver_install_container: "{{ nvidia_driver_install_centos_container }}" +nvidia_driver_install_supported: true diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml new file mode 100644 index 0000000..f1bfdfc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml @@ -0,0 +1,3 @@ +--- +nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}" +nvidia_driver_install_supported: true diff --git a/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml new file mode 100644 index 0000000..f1bfdfc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml @@ -0,0 +1,3 @@ +--- +nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}" +nvidia_driver_install_supported: true diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml b/kubespray/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml new file mode 100644 index 0000000..46384d2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: crun | Copy runtime class manifest + template: + src: runtimeclass-crun.yml + dest: "{{ kube_config_dir }}/runtimeclass-crun.yml" + mode: "0664" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: crun | Apply manifests + kube: + name: "runtimeclass-crun" + kubectl: "{{ bin_dir }}/kubectl" + resource: "runtimeclass" + filename: "{{ kube_config_dir }}/runtimeclass-crun.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml b/kubespray/roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml new file mode 100644 index 0000000..99d97e6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml @@ -0,0 +1,6 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: crun +handler: crun diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml b/kubespray/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml new file mode 100644 index 0000000..b5b881e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml @@ -0,0 +1,34 @@ +--- +- name: gVisor | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/gvisor" + owner: root + group: root + mode: 0755 + recurse: true + +- name: gVisor | Templates List + set_fact: + gvisor_templates: + - { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass } + +- name: gVisort | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}" + mode: 0644 + with_items: "{{ gvisor_templates }}" + register: gvisor_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: gVisor | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/gvisor/{{ item.item.file }}" + state: "latest" + with_items: "{{ gvisor_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2 b/kubespray/roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2 new file mode 100644 index 0000000..64465fa --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2 @@ -0,0 +1,6 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: gvisor +handler: runsc diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml b/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml new file mode 100644 index 0000000..6eacb79 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml @@ -0,0 +1,5 @@ +--- + +kata_containers_qemu_overhead: true +kata_containers_qemu_overhead_fixed_cpu: 250m +kata_containers_qemu_overhead_fixed_memory: 160Mi diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml b/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml new file mode 100644 index 0000000..a07c7c2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml @@ -0,0 +1,35 @@ +--- + +- name: Kata Containers | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/kata_containers" + owner: root + group: root + mode: 0755 + recurse: true + +- name: Kata Containers | Templates list + set_fact: + kata_containers_templates: + - { name: runtimeclass-kata-qemu, file: runtimeclass-kata-qemu.yml, type: runtimeclass } + +- name: Kata Containers | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/kata_containers/{{ item.file }}" + mode: 0644 + with_items: "{{ kata_containers_templates }}" + register: kata_containers_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kata Containers | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/kata_containers/{{ item.item.file }}" + state: "latest" + with_items: "{{ kata_containers_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2 b/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2 new file mode 100644 index 0000000..2240cdb --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2 @@ -0,0 +1,12 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata-qemu +handler: kata-qemu +{% if kata_containers_qemu_overhead %} +overhead: + podFixed: + cpu: {{ kata_containers_qemu_overhead_fixed_cpu }} + memory: {{ kata_containers_qemu_overhead_fixed_memory }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/meta/main.yml b/kubespray/roles/kubernetes-apps/container_runtimes/meta/main.yml new file mode 100644 index 0000000..8584117 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/meta/main.yml @@ -0,0 +1,31 @@ +--- +dependencies: + - role: kubernetes-apps/container_runtimes/kata_containers + when: kata_containers_enabled + tags: + - apps + - kata-containers + - container-runtimes + + - role: kubernetes-apps/container_runtimes/gvisor + when: gvisor_enabled + tags: + - apps + - gvisor + - container-runtimes + + - role: kubernetes-apps/container_runtimes/crun + when: crun_enabled + tags: + - apps + - crun + - container-runtimes + + - role: kubernetes-apps/container_runtimes/youki + when: + - youki_enabled + - container_manager == 'crio' + tags: + - apps + - youki + - container-runtimes diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml b/kubespray/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml new file mode 100644 index 0000000..6da025f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: youki | Copy runtime class manifest + template: + src: runtimeclass-youki.yml + dest: "{{ kube_config_dir }}/runtimeclass-youki.yml" + mode: "0664" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: youki | Apply manifests + kube: + name: "runtimeclass-youki" + kubectl: "{{ bin_dir }}/kubectl" + resource: "runtimeclass" + filename: "{{ kube_config_dir }}/runtimeclass-youki.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml b/kubespray/roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml new file mode 100644 index 0000000..b68bd06 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml @@ -0,0 +1,6 @@ +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: youki +handler: youki diff --git a/kubespray/roles/kubernetes-apps/csi_driver/OWNERS b/kubespray/roles/kubernetes-apps/csi_driver/OWNERS new file mode 100644 index 0000000..6cfbaa8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - alijahnas + - luckySB diff --git a/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml new file mode 100644 index 0000000..33df37c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml @@ -0,0 +1,11 @@ +--- +aws_ebs_csi_enable_volume_scheduling: true +aws_ebs_csi_enable_volume_snapshot: false +aws_ebs_csi_enable_volume_resizing: false +aws_ebs_csi_controller_replicas: 1 +aws_ebs_csi_plugin_image_tag: latest + +# Add annotions to ebs_csi_controller. Useful if using kube2iam for role assumption +# aws_ebs_csi_annotations: +# - key: iam.amazonaws.com/role +# value: your-ebs-role-arn diff --git a/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml new file mode 100644 index 0000000..5570dcc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: AWS CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: aws-ebs-csi-driver, file: aws-ebs-csi-driver.yml} + - {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice-rbac.yml} + - {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml} + - {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml} + register: aws_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: AWS CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ aws_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice-rbac.yml.j2 new file mode 100644 index 0000000..87bfa31 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice-rbac.yml.j2 @@ -0,0 +1,180 @@ +# Controller Service +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +# The permissions in this ClusterRole are tightly coupled with the version of csi-attacher used. More information about this can be found in kubernetes-csi/external-attacher. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +{% if aws_ebs_csi_enable_volume_snapshot %} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +{% endif %} + +{% if aws_ebs_csi_enable_volume_resizing %} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-resizer-binding +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2 new file mode 100644 index 0000000..ffce40b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2 @@ -0,0 +1,132 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ebs-csi-controller + namespace: kube-system +spec: + replicas: {{ aws_ebs_csi_controller_replicas }} + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver +{% if aws_ebs_csi_annotations is defined %} + annotations: +{% for annotation in aws_ebs_csi_annotations %} + {{ annotation.key }}: {{ annotation.value }} +{% endfor %} +{% endif %} + spec: + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ebs-csi-controller-sa + priorityClassName: system-cluster-critical + containers: + - name: ebs-plugin + image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }} + args: + - --endpoint=$(CSI_ENDPOINT) +{% if aws_ebs_csi_extra_volume_tags is defined %} + - --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }} +{% endif %} + - --logtostderr + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-secret + key: key_id + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-secret + key: access_key + optional: true + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + - name: csi-provisioner + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --v=5 +{% if aws_ebs_csi_enable_volume_scheduling %} + - --feature-gates=Topology=true +{% endif %} + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-attacher + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ +{% if aws_ebs_csi_enable_volume_snapshot %} + - name: csi-snapshotter + image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --timeout=15s + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ +{% endif %} +{% if aws_ebs_csi_enable_volume_resizing %} + - name: csi-resizer + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ +{% endif %} + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + volumes: + - name: socket-dir + emptyDir: {} + diff --git a/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2 new file mode 100644 index 0000000..99c6c5b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: ebs.csi.aws.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2 new file mode 100644 index 0000000..1dc1925 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2 @@ -0,0 +1,101 @@ +--- +# Node Service +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ebs-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + priorityClassName: system-node-critical + containers: + - name: ebs-plugin + securityContext: + privileged: true + image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }} + args: + - --endpoint=$(CSI_ENDPOINT) +{% if aws_ebs_csi_extra_volume_tags is defined %} + - --extra-volume-tags={{ aws_ebs_csi_extra_volume_tags }} +{% endif %} + - --logtostderr + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + - name: node-driver-registrar + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: device-dir + hostPath: + path: /dev + type: Directory diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml new file mode 100644 index 0000000..341cc97 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml @@ -0,0 +1,6 @@ +--- +azure_csi_use_instance_metadata: true +azure_csi_controller_replicas: 2 +azure_csi_plugin_image_tag: latest +azure_csi_controller_affinity: {} +azure_csi_node_affinity: {} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/tasks/azure-credential-check.yml b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/tasks/azure-credential-check.yml new file mode 100644 index 0000000..0a858ee --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/tasks/azure-credential-check.yml @@ -0,0 +1,54 @@ +--- +- name: Azure CSI Driver | check azure_csi_tenant_id value + fail: + msg: "azure_csi_tenant_id is missing" + when: azure_csi_tenant_id is not defined or not azure_csi_tenant_id + +- name: Azure CSI Driver | check azure_csi_subscription_id value + fail: + msg: "azure_csi_subscription_id is missing" + when: azure_csi_subscription_id is not defined or not azure_csi_subscription_id + +- name: Azure CSI Driver | check azure_csi_aad_client_id value + fail: + msg: "azure_csi_aad_client_id is missing" + when: azure_csi_aad_client_id is not defined or not azure_csi_aad_client_id + +- name: Azure CSI Driver | check azure_csi_aad_client_secret value + fail: + msg: "azure_csi_aad_client_secret is missing" + when: azure_csi_aad_client_secret is not defined or not azure_csi_aad_client_secret + +- name: Azure CSI Driver | check azure_csi_resource_group value + fail: + msg: "azure_csi_resource_group is missing" + when: azure_csi_resource_group is not defined or not azure_csi_resource_group + +- name: Azure CSI Driver | check azure_csi_location value + fail: + msg: "azure_csi_location is missing" + when: azure_csi_location is not defined or not azure_csi_location + +- name: Azure CSI Driver | check azure_csi_subnet_name value + fail: + msg: "azure_csi_subnet_name is missing" + when: azure_csi_subnet_name is not defined or not azure_csi_subnet_name + +- name: Azure CSI Driver | check azure_csi_security_group_name value + fail: + msg: "azure_csi_security_group_name is missing" + when: azure_csi_security_group_name is not defined or not azure_csi_security_group_name + +- name: Azure CSI Driver | check azure_csi_vnet_name value + fail: + msg: "azure_csi_vnet_name is missing" + when: azure_csi_vnet_name is not defined or not azure_csi_vnet_name + +- name: Azure CSI Driver | check azure_csi_vnet_resource_group value + fail: + msg: "azure_csi_vnet_resource_group is missing" + when: azure_csi_vnet_resource_group is not defined or not azure_csi_vnet_resource_group + +- name: "Azure CSI Driver | check azure_csi_use_instance_metadata is a bool" + assert: + that: azure_csi_use_instance_metadata | type_debug == 'bool' diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml new file mode 100644 index 0000000..67ce865 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- include_tasks: azure-credential-check.yml + +- name: Azure CSI Driver | Write Azure CSI cloud-config + template: + src: "azure-csi-cloud-config.j2" + dest: "{{ kube_config_dir }}/azure_csi_cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Azure CSI Driver | Get base64 cloud-config + slurp: + src: "{{ kube_config_dir }}/azure_csi_cloud_config" + register: cloud_config_secret + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Azure CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: azure-csi-azuredisk-driver, file: azure-csi-azuredisk-driver.yml} + - {name: azure-csi-cloud-config-secret, file: azure-csi-cloud-config-secret.yml} + - {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller-rbac.yml} + - {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller.yml} + - {name: azure-csi-azuredisk-node-rbac, file: azure-csi-azuredisk-node-rbac.yml} + - {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml} + register: azure_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Azure CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ azure_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller-rbac.yml.j2 new file mode 100644 index 0000000..16f4c98 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller-rbac.yml.j2 @@ -0,0 +1,230 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-cluster-driver-registrar-role +rules: + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csidrivers"] + verbs: ["create", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-driver-registrar-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-cluster-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller.yml.j2 new file mode 100644 index 0000000..36d38ac --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-controller.yml.j2 @@ -0,0 +1,179 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-controller + namespace: kube-system +spec: + replicas: {{ azure_csi_controller_replicas }} + selector: + matchLabels: + app: csi-azuredisk-controller + template: + metadata: + labels: + app: csi-azuredisk-controller + spec: + hostNetwork: true + serviceAccountName: csi-azuredisk-controller-sa + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + tolerations: + - key: "node-role.kubernetes.io/master" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + effect: "NoSchedule" +{% if azure_csi_controller_affinity %} + affinity: + {{ azure_csi_controller_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} + containers: + - name: csi-provisioner + image: {{ azure_csi_image_repo }}/csi-provisioner:{{ azure_csi_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--feature-gates=Topology=true" + - "--csi-address=$(ADDRESS)" + - "--v=2" + - "--timeout=15s" + - "--leader-election" + - "--worker-threads=40" + - "--extra-create-metadata=true" + - "--strict-topology=true" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-attacher + image: {{ azure_csi_image_repo }}/csi-attacher:{{ azure_csi_attacher_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "-v=2" + - "-csi-address=$(ADDRESS)" + - "-timeout=600s" + - "-leader-election" + - "-worker-threads=500" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-snapshotter + image: {{ azure_csi_image_repo }}/csi-snapshotter:{{ azure_csi_snapshotter_image_tag }} + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" + - "-v=2" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: {{ azure_csi_image_repo }}/csi-resizer:{{ azure_csi_resizer_image_tag }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - '-handle-volume-inuse-error=false' + - "-timeout=60s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29602 + - --v=2 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29604" + - "--disable-avset-nodes=true" + - "--drivername=disk.csi.azure.com" + - "--cloud-config-secret-name=cloud-config" + - "--cloud-config-secret-namespace=kube-system" + ports: + - containerPort: 29602 + name: healthz + protocol: TCP + - containerPort: 29604 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + value: "/etc/kubernetes/azure.json" + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + readOnly: true + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + secret: + secretName: cloud-config diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2 new file mode 100644 index 0000000..c7cba34 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: disk.csi.azure.com +spec: + attachRequired: true + podInfoOnMount: true + volumeLifecycleModes: # added in Kubernetes 1.16 + - Persistent diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node-rbac.yml.j2 new file mode 100644 index 0000000..d55ea0d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node-rbac.yml.j2 @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-azuredisk-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-azuredisk-node-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node.yml.j2 new file mode 100644 index 0000000..4d80319 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-node.yml.j2 @@ -0,0 +1,168 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-azuredisk-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-azuredisk-node + template: + metadata: + labels: + app: csi-azuredisk-node + spec: + hostNetwork: true + dnsPolicy: Default + serviceAccountName: csi-azuredisk-node-sa + nodeSelector: + kubernetes.io/os: linux +{% if azure_csi_node_affinity %} + affinity: + {{ azure_csi_node_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} + priorityClassName: system-node-critical + tolerations: + - operator: Exists + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: {{ azure_csi_image_repo }}/livenessprobe:{{ azure_csi_livenessprobe_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29603 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: {{ azure_csi_image_repo }}/csi-node-driver-registrar:{{ azure_csi_node_registrar_image_tag }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: azuredisk + image: {{ azure_csi_plugin_image_repo }}/azuredisk-csi:{{ azure_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--metrics-address=0.0.0.0:29605" + - "--enable-perf-optimization=true" + - "--drivername=disk.csi.azure.com" + - "--volume-attach-limit=-1" + - "--cloud-config-secret-name=cloud-config" + - "--cloud-config-secret-namespace=kube-system" + ports: + - containerPort: 29603 + name: healthz + protocol: TCP + - containerPort: 29605 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + value: "/etc/kubernetes/azure.json" + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /dev + name: device-dir + - mountPath: /sys/bus/scsi/devices + name: sys-devices-dir + - mountPath: /sys/class/scsi_host/ + name: scsi-host-dir + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/disk.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - secret: + defaultMode: 0644 + secretName: cloud-config + name: azure-cred + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /sys/bus/scsi/devices + type: Directory + name: sys-devices-dir + - hostPath: + path: /sys/class/scsi_host/ + type: Directory + name: scsi-host-dir diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..f259cec --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2 @@ -0,0 +1,7 @@ +kind: Secret +apiVersion: v1 +metadata: + name: cloud-config + namespace: kube-system +data: + azure.json: {{ cloud_config_secret.content }} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config.j2 b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config.j2 new file mode 100644 index 0000000..d3932f5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config.j2 @@ -0,0 +1,14 @@ +{ + "cloud":"AzurePublicCloud", + "tenantId": "{{ azure_csi_tenant_id }}", + "subscriptionId": "{{ azure_csi_subscription_id }}", + "aadClientId": "{{ azure_csi_aad_client_id }}", + "aadClientSecret": "{{ azure_csi_aad_client_secret }}", + "location": "{{ azure_csi_location }}", + "resourceGroup": "{{ azure_csi_resource_group }}", + "vnetName": "{{ azure_csi_vnet_name }}", + "vnetResourceGroup": "{{ azure_csi_vnet_resource_group }}", + "subnetName": "{{ azure_csi_subnet_name }}", + "securityGroupName": "{{ azure_csi_security_group_name }}", + "useInstanceMetadata": {{ azure_csi_use_instance_metadata }}, +} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml new file mode 100644 index 0000000..6a13e86 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml @@ -0,0 +1,30 @@ +--- +# To access Cinder, the CSI controller will need credentials to access +# openstack apis. Per default this values will be +# read from the environment. +cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +cinder_username: "{{ lookup('env','OS_USERNAME') }}" +cinder_password: "{{ lookup('env','OS_PASSWORD') }}" +cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}" +cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}" +cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}" +cinder_region: "{{ lookup('env','OS_REGION_NAME') }}" +cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}" +cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}" +cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" +cinder_cacert: "{{ lookup('env','OS_CACERT') }}" + +# For now, only Cinder v3 is supported in Cinder CSI driver +cinder_blockstorage_version: "v3" +cinder_csi_controller_replicas: 1 + +# Optional. Set to true, to rescan block device and verify its size before expanding +# the filesystem. +# Not all hypervizors have a /sys/class/block/XXX/device/rescan location, therefore if +# you enable this option and your hypervizor doesn't support this, you'll get a warning +# log on resize event. It is recommended to disable this option in this case. +# Defaults to false +# cinder_csi_rescan_on_resize: true + +cinder_tolerations: [] diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml b/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml new file mode 100644 index 0000000..cb65f42 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml @@ -0,0 +1,59 @@ +--- +- name: Cinder CSI Driver | check cinder_auth_url value + fail: + msg: "cinder_auth_url is missing" + when: cinder_auth_url is not defined or not cinder_auth_url + +- name: Cinder CSI Driver | check cinder_username value cinder_application_credential_name value + fail: + msg: "you must either set cinder_username or cinder_application_credential_name" + when: + - cinder_username is not defined or not cinder_username + - cinder_application_credential_name is not defined or not cinder_application_credential_name + +- name: Cinder CSI Driver | check cinder_application_credential_id value + fail: + msg: "cinder_application_credential_id is missing" + when: + - cinder_application_credential_name is defined + - cinder_application_credential_name|length > 0 + - cinder_application_credential_id is not defined or not cinder_application_credential_id + +- name: Cinder CSI Driver | check cinder_application_credential_secret value + fail: + msg: "cinder_application_credential_secret is missing" + when: + - cinder_application_credential_name is defined + - cinder_application_credential_name|length > 0 + - cinder_application_credential_secret is not defined or not cinder_application_credential_secret + +- name: Cinder CSI Driver | check cinder_password value + fail: + msg: "cinder_password is missing" + when: + - cinder_username is defined + - cinder_username|length > 0 + - cinder_application_credential_name is not defined or not cinder_application_credential_name + - cinder_application_credential_secret is not defined or not cinder_application_credential_secret + - cinder_password is not defined or not cinder_password + +- name: Cinder CSI Driver | check cinder_region value + fail: + msg: "cinder_region is missing" + when: cinder_region is not defined or not cinder_region + +- name: Cinder CSI Driver | check cinder_tenant_id value + fail: + msg: "one of cinder_tenant_id or cinder_tenant_name must be specified" + when: + - cinder_tenant_id is not defined or not cinder_tenant_id + - cinder_tenant_name is not defined or not cinder_tenant_name + - cinder_application_credential_name is not defined or not cinder_application_credential_name + +- name: Cinder CSI Driver | check cinder_domain_id value + fail: + msg: "one of cinder_domain_id or cinder_domain_name must be specified" + when: + - cinder_domain_id is not defined or not cinder_domain_id + - cinder_domain_name is not defined or not cinder_domain_name + - cinder_application_credential_name is not defined or not cinder_application_credential_name diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml b/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml new file mode 100644 index 0000000..c6d14a2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml @@ -0,0 +1,11 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: Cinder CSI Driver | Write cacert file + copy: + src: "{{ cinder_cacert }}" + dest: "{{ kube_config_dir }}/cinder-cacert.pem" + group: "{{ kube_cert_group }}" + mode: 0640 + delegate_to: "{{ delegate_host_to_write_cacert }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml new file mode 100644 index 0000000..7d5affe --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- include_tasks: cinder-credential-check.yml + +- name: Cinder CSI Driver | Write cacert file + include_tasks: cinder-write-cacert.yml + run_once: true + loop: "{{ groups['k8s_cluster'] }}" + loop_control: + loop_var: delegate_host_to_write_cacert + when: + - inventory_hostname in groups['k8s_cluster'] + - cinder_cacert is defined + - cinder_cacert | length > 0 + +- name: Cinder CSI Driver | Write Cinder cloud-config + template: + src: "cinder-csi-cloud-config.j2" + dest: "{{ kube_config_dir }}/cinder_cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cinder CSI Driver | Get base64 cloud-config + slurp: + src: "{{ kube_config_dir }}/cinder_cloud_config" + register: cloud_config_secret + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cinder CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: cinder-csi-driver, file: cinder-csi-driver.yml} + - {name: cinder-csi-cloud-config-secret, file: cinder-csi-cloud-config-secret.yml} + - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin-rbac.yml} + - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin.yml} + - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin-rbac.yml} + - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml} + - {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml} + register: cinder_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cinder CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ cinder_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..cb3cba6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2 @@ -0,0 +1,10 @@ +# This YAML file contains secret objects, +# which are necessary to run csi cinder plugin. + +kind: Secret +apiVersion: v1 +metadata: + name: cloud-config + namespace: kube-system +data: + cloud.conf: {{ cloud_config_secret.content }} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config.j2 new file mode 100644 index 0000000..1a83f7d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config.j2 @@ -0,0 +1,44 @@ +[Global] +auth-url="{{ cinder_auth_url }}" +{% if cinder_application_credential_id|length == 0 and cinder_application_credential_name|length == 0 %} +username="{{ cinder_username }}" +password="{{ cinder_password }}" +{% endif %} +{% if cinder_application_credential_id|length > 0 %} +application-credential-id={{ cinder_application_credential_id }} +{% endif %} +{% if cinder_application_credential_name|length > 0 %} +application-credential-name={{ cinder_application_credential_name }} +{% endif %} +{% if cinder_application_credential_secret|length > 0 %} +application-credential-secret={{ cinder_application_credential_secret }} +{% endif %} +region="{{ cinder_region }}" +{% if cinder_tenant_id|length > 0 %} +tenant-id="{{ cinder_tenant_id }}" +{% endif %} +{% if cinder_tenant_name|length > 0 %} +tenant-name="{{ cinder_tenant_name }}" +{% endif %} +{% if cinder_domain_name|length > 0 %} +domain-name="{{ cinder_domain_name }}" +{% elif cinder_domain_id|length > 0 %} +domain-id ="{{ cinder_domain_id }}" +{% endif %} +{% if cinder_cacert|length > 0 %} +ca-file="{{ kube_config_dir }}/cinder-cacert.pem" +{% endif %} + +[BlockStorage] +{% if cinder_blockstorage_version is defined %} +bs-version={{ cinder_blockstorage_version }} +{% endif %} +{% if cinder_csi_ignore_volume_az is defined %} +ignore-volume-az={{ cinder_csi_ignore_volume_az | bool }} +{% endif %} +{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %} +node-volume-attach-limit="{{ node_volume_attach_limit }}" +{% endif %} +{% if cinder_csi_rescan_on_resize is defined %} +rescan-on-resize={{ cinder_csi_rescan_on_resize | bool }} +{% endif %} \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 new file mode 100644 index 0000000..d40053a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 @@ -0,0 +1,179 @@ +# This YAML file contains RBAC API objects, +# which are necessary to run csi controller plugin + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-cinder-controller-sa + namespace: kube-system + +--- +# external attacher +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- +# external Provisioner +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +# external snapshotter +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-snapshotter-role + apiGroup: rbac.authorization.k8s.io +--- + +# External Resizer +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +rules: + # The following rule should be uncommented for plugins that require secrets + # for provisioning. + # - apiGroups: [""] + # resources: ["secrets"] + # verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-resizer-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 new file mode 100644 index 0000000..6bd671a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 @@ -0,0 +1,156 @@ +# This YAML file contains CSI Controller Plugin Sidecars +# external-attacher, external-provisioner, external-snapshotter + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-cinder-controllerplugin + namespace: kube-system +spec: + replicas: {{ cinder_csi_controller_replicas }} + selector: + matchLabels: + app: csi-cinder-controllerplugin + template: + metadata: + labels: + app: csi-cinder-controllerplugin + spec: + serviceAccountName: csi-cinder-controller-sa + containers: + - name: csi-attacher + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - --leader-election=true +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-provisioner + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" + - "--default-fstype=ext4" + - "--extra-create-metadata" +{% if cinder_topology is defined and cinder_topology %} + - --feature-gates=Topology=true +{% endif %} +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - "--leader-election=true" +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-snapshotter + image: {{ csi_snapshotter_image_repo }}:{{ csi_snapshotter_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" + - "--extra-create-metadata" +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - --leader-election=true +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - name: csi-resizer + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--timeout=3m" + - "--handle-volume-inuse-error=false" +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + - --leader-election=true +{% endif %} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - name: cinder-csi-plugin + image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - /bin/cinder-csi-plugin + - "--endpoint=$(CSI_ENDPOINT)" + - "--cloud-config=$(CLOUD_CONFIG)" + - "--cluster=$(CLUSTER_NAME)" + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + - name: CLUSTER_NAME + value: kubernetes + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 10 + periodSeconds: 60 + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: secret-cinderplugin + mountPath: /etc/config + readOnly: true + - name: ca-certs + mountPath: /etc/ssl/certs + readOnly: true +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + mountPath: {{ kube_config_dir }}/cinder-cacert.pem + readOnly: true +{% endif %} + volumes: + - name: socket-dir + emptyDir: + - name: secret-cinderplugin + secret: + secretName: cloud-config + - name: ca-certs + hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + hostPath: + path: {{ kube_config_dir }}/cinder-cacert.pem + type: FileOrCreate +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 new file mode 100644 index 0000000..5b681e4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2 @@ -0,0 +1,10 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: cinder.csi.openstack.org +spec: + attachRequired: true + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2 new file mode 100644 index 0000000..db58963 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2 @@ -0,0 +1,38 @@ +# This YAML defines all API objects to create RBAC roles for csi node plugin. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-cinder-node-sa + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-nodeplugin-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-nodeplugin-binding +subjects: + - kind: ServiceAccount + name: csi-cinder-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-nodeplugin-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 new file mode 100644 index 0000000..3cdf9bb --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 @@ -0,0 +1,130 @@ +# This YAML file contains driver-registrar & csi driver nodeplugin API objects, +# which are necessary to run csi nodeplugin for cinder. + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-cinder-nodeplugin + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-cinder-nodeplugin + template: + metadata: + labels: + app: csi-cinder-nodeplugin + spec: + serviceAccountName: csi-cinder-node-sa + hostNetwork: true + containers: + - name: node-driver-registrar + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: liveness-probe + image: {{ csi_livenessprobe_image_repo }}:{{ csi_livenessprobe_image_tag }} + args: + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: cinder-csi-plugin + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: {{ cinder_csi_plugin_image_repo }}:{{ cinder_csi_plugin_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - /bin/cinder-csi-plugin + - "--endpoint=$(CSI_ENDPOINT)" + - "--cloud-config=$(CLOUD_CONFIG)" + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: pods-probe-dir + mountPath: /dev + mountPropagation: "HostToContainer" + - name: secret-cinderplugin + mountPath: /etc/config + readOnly: true + - name: ca-certs + mountPath: /etc/ssl/certs + readOnly: true +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + mountPath: {{ kube_config_dir }}/cinder-cacert.pem + readOnly: true +{% endif %} + volumes: + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/cinder.csi.openstack.org + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: pods-probe-dir + hostPath: + path: /dev + type: Directory + - name: secret-cinderplugin + secret: + secretName: cloud-config + - name: ca-certs + hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate +{% if cinder_cacert is defined and cinder_cacert != "" %} + - name: cinder-cacert + hostPath: + path: {{ kube_config_dir }}/cinder-cacert.pem + type: FileOrCreate +{% endif %} +{% if cinder_tolerations %} + tolerations: + {{ cinder_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 new file mode 100644 index 0000000..391d3b3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 @@ -0,0 +1,14 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cinder-csi-pdb + namespace: kube-system +spec: +{% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} + minAvailable: 1 +{% else %} + minAvailable: 0 +{% endif %} + selector: + matchLabels: + app: csi-cinder-controllerplugin diff --git a/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml new file mode 100644 index 0000000..4790931 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: CSI CRD | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: volumesnapshotclasses, file: volumesnapshotclasses.yml} + - {name: volumesnapshotcontents, file: volumesnapshotcontents.yml} + - {name: volumesnapshots, file: volumesnapshots.yml} + register: csi_crd_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: CSI CRD | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + wait: true + with_items: + - "{{ csi_crd_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotclasses.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotclasses.yml.j2 new file mode 100644 index 0000000..47e5fd3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotclasses.yml.j2 @@ -0,0 +1,116 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotcontents.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotcontents.yml.j2 new file mode 100644 index 0000000..c611221 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshotcontents.yml.j2 @@ -0,0 +1,305 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshots.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshots.yml.j2 new file mode 100644 index 0000000..1b41ff8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/csi_crd/templates/volumesnapshots.yml.j2 @@ -0,0 +1,231 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml new file mode 100644 index 0000000..1ee662e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml @@ -0,0 +1,2 @@ +--- +gcp_pd_csi_controller_replicas: 1 diff --git a/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml new file mode 100644 index 0000000..59a99f7 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: GCP PD CSI Driver | Check if cloud-sa.json exists + fail: + msg: "Credentials file cloud-sa.json is mandatory" + when: gcp_pd_csi_sa_cred_file is not defined or not gcp_pd_csi_sa_cred_file + +- name: GCP PD CSI Driver | Copy GCP credentials file + copy: + src: "{{ gcp_pd_csi_sa_cred_file }}" + dest: "{{ kube_config_dir }}/cloud-sa.json" + group: "{{ kube_cert_group }}" + mode: 0640 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: GCP PD CSI Driver | Get base64 cloud-sa.json + slurp: + src: "{{ kube_config_dir }}/cloud-sa.json" + register: gcp_cred_secret + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: GCP PD CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: gcp-pd-csi-cred-secret, file: gcp-pd-csi-cred-secret.yml} + - {name: gcp-pd-csi-setup, file: gcp-pd-csi-setup.yml} + - {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml} + - {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml} + register: gcp_pd_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: GCP PD CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ gcp_pd_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 new file mode 100644 index 0000000..4762093 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 @@ -0,0 +1,75 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-gce-pd-controller + namespace: kube-system +spec: + serviceName: "csi-gce-pd" + replicas: {{ gcp_pd_csi_controller_replicas }} + selector: + matchLabels: + app: gcp-compute-persistent-disk-csi-driver + template: + metadata: + labels: + app: gcp-compute-persistent-disk-csi-driver + spec: + # Host network must be used for interaction with Workload Identity in GKE + # since it replaces GCE Metadata Server with GKE Metadata Server. Remove + # this requirement when issue is resolved and before any exposure of + # metrics ports + hostNetwork: true + serviceAccountName: csi-gce-pd-controller-sa + priorityClassName: csi-gce-pd-controller + containers: + - name: csi-provisioner + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + - "--feature-gates=Topology=true" + - "--default-fstype=ext4" + # - "--run-controller-service=false" # disable the controller service of the CSI driver + # - "--run-node-service=false" # disable the node service of the CSI driver + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-attacher + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: gce-pd-driver + # Don't change base image without changing pdImagePlaceholder in + # test/k8s-integration/main.go + image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }} + args: + - "--v=5" + - "--endpoint=unix:/csi/csi.sock" + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/cloud-sa/cloud-sa.json" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: cloud-sa-volume + readOnly: true + mountPath: "/etc/cloud-sa" + volumes: + - name: socket-dir + emptyDir: {} + - name: cloud-sa-volume + secret: + secretName: cloud-sa + volumeClaimTemplates: [] diff --git a/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2 new file mode 100644 index 0000000..f8291a4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2 @@ -0,0 +1,8 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: cloud-sa + namespace: kube-system +data: + cloud-sa.json: {{ gcp_cred_secret.content }} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 new file mode 100644 index 0000000..204ff97 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 @@ -0,0 +1,111 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-gce-pd-node + namespace: kube-system +spec: + selector: + matchLabels: + app: gcp-compute-persistent-disk-csi-driver + template: + metadata: + labels: + app: gcp-compute-persistent-disk-csi-driver + spec: + # Host network must be used for interaction with Workload Identity in GKE + # since it replaces GCE Metadata Server with GKE Metadata Server. Remove + # this requirement when issue is resolved and before any exposure of + # metrics ports. + hostNetwork: true + priorityClassName: csi-gce-pd-node + serviceAccountName: csi-gce-pd-node-sa + containers: + - name: csi-driver-registrar + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock" + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/pd.csi.storage.gke.io /registration/pd.csi.storage.gke.io-reg.sock"] + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: gce-pd-driver + securityContext: + privileged: true + # Don't change base image without changing pdImagePlaceholder in + # test/k8s-integration/main.go + image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }} + args: + - "--v=5" + - "--endpoint=unix:/csi/csi.sock" + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + # The following mounts are required to trigger host udevadm from + # container + - name: udev-rules-etc + mountPath: /etc/udev + - name: udev-rules-lib + mountPath: /lib/udev + - name: udev-socket + mountPath: /run/udev + - name: sys + mountPath: /sys + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/ + type: DirectoryOrCreate + - name: device-dir + hostPath: + path: /dev + type: Directory + # The following mounts are required to trigger host udevadm from + # container + - name: udev-rules-etc + hostPath: + path: /etc/udev + type: Directory + - name: udev-rules-lib + hostPath: + path: /lib/udev + type: Directory + - name: udev-socket + hostPath: + path: /run/udev + type: Directory + - name: sys + hostPath: + path: /sys + type: Directory + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # See "special case". This will tolerate everything. Node component should + # be scheduled on all nodes. + tolerations: + - operator: Exists diff --git a/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-setup.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-setup.yml.j2 new file mode 100644 index 0000000..4c693b3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-setup.yml.j2 @@ -0,0 +1,200 @@ +##### Node Service Account, Roles, RoleBindings +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-gce-pd-node-sa + namespace: kube-system + +--- +##### Controller Service Account, Roles, Rolebindings +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-gce-pd-controller-sa + namespace: kube-system + +--- +# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-controller-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-gce-pd-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-controller-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-gce-pd-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: csi-gce-pd-controller +value: 900000000 +globalDefault: false +description: "This priority class should be used for the GCE PD CSI driver controller deployment only." + +--- + +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: csi-gce-pd-node +value: 900001000 +globalDefault: false +description: "This priority class should be used for the GCE PD CSI driver node deployment only." + +--- + +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-resizer-binding +subjects: + - kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-gce-pd-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: csi-gce-pd-node-psp +spec: + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + privileged: true + volumes: + - '*' + hostNetwork: true + allowedHostPaths: + - pathPrefix: "/var/lib/kubelet/plugins_registry/" + - pathPrefix: "/var/lib/kubelet" + - pathPrefix: "/var/lib/kubelet/plugins/pd.csi.storage.gke.io/" + - pathPrefix: "/dev" + - pathPrefix: "/etc/udev" + - pathPrefix: "/lib/udev" + - pathPrefix: "/run/udev" + - pathPrefix: "/sys" +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-gce-pd-node-deploy +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - csi-gce-pd-node-psp +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-node-deploy +subjects: +- kind: ServiceAccount + name: csi-gce-pd-node-sa + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml new file mode 100644 index 0000000..657b300 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml @@ -0,0 +1,16 @@ +--- +upcloud_csi_controller_replicas: 1 +upcloud_csi_provisioner_image_tag: "v3.1.0" +upcloud_csi_attacher_image_tag: "v3.4.0" +upcloud_csi_resizer_image_tag: "v1.4.0" +upcloud_csi_plugin_image_tag: "v0.3.3" +upcloud_csi_node_image_tag: "v2.5.0" +upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}" +upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}" +upcloud_tolerations: [] +upcloud_csi_enable_volume_snapshot: false +upcloud_csi_snapshot_controller_replicas: 2 +upcloud_csi_snapshotter_image_tag: "v4.2.1" +upcloud_csi_snapshot_controller_image_tag: "v4.2.1" +upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1" +upcloud_cacert: "{{ lookup('env','OS_CACERT') }}" \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml new file mode 100644 index 0000000..f37daba --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: UpCloud CSI Driver | Check if UPCLOUD_USERNAME exists + fail: + msg: "UpCloud username is missing. Env UPCLOUD_USERNAME is mandatory" + when: upcloud_username is not defined or not upcloud_username + +- name: UpCloud CSI Driver | Check if UPCLOUD_PASSWORD exists + fail: + msg: "UpCloud password is missing. Env UPCLOUD_PASSWORD is mandatory" + when: + - upcloud_username is defined + - upcloud_username|length > 0 + - upcloud_password is not defined or not upcloud_password + +- name: UpCloud CSI Driver | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: upcloud-csi-cred-secret, file: upcloud-csi-cred-secret.yml} + - {name: upcloud-csi-setup, file: upcloud-csi-setup.yml} + - {name: upcloud-csi-controller, file: upcloud-csi-controller.yml} + - {name: upcloud-csi-node, file: upcloud-csi-node.yml} + - {name: upcloud-csi-driver, file: upcloud-csi-driver.yml} + register: upcloud_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: UpCloud CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ upcloud_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 new file mode 100644 index 0000000..0d52837 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 @@ -0,0 +1,93 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-upcloud-controller + namespace: kube-system +spec: + serviceName: "csi-upcloud" + replicas: {{ upcloud_csi_controller_replicas }} + selector: + matchLabels: + app: csi-upcloud-controller + template: + metadata: + labels: + app: csi-upcloud-controller + role: csi-upcloud + spec: + priorityClassName: system-cluster-critical + serviceAccount: csi-upcloud-controller-sa + containers: + - name: csi-provisioner + image: registry.k8s.io/sig-storage/csi-provisioner:{{ upcloud_csi_provisioner_image_tag }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--timeout=600s" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-attacher + image: registry.k8s.io/sig-storage/csi-attacher:{{ upcloud_csi_attacher_image_tag }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--timeout=120s" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-resizer + image: registry.k8s.io/sig-storage/csi-resizer:{{ upcloud_csi_resizer_image_tag }} + args: + - "--v=5" + - "--timeout=120s" + - "--csi-address=$(ADDRESS)" + - "--handle-volume-inuse-error=true" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-upcloud-plugin + image: ghcr.io/upcloudltd/upcloud-csi:{{ upcloud_csi_plugin_image_tag }} + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodehost=$(NODE_ID)" + - "--username=$(UPCLOUD_USERNAME)" + - "--password=$(UPCLOUD_PASSWORD)" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: UPCLOUD_USERNAME + valueFrom: + secretKeyRef: + name: upcloud + key: username + - name: UPCLOUD_PASSWORD + valueFrom: + secretKeyRef: + name: upcloud + key: password + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + imagePullSecrets: + - name: regcred + volumes: + - name: socket-dir + emptyDir: {} \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2 new file mode 100644 index 0000000..5e91d88 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: upcloud + namespace: kube-system +stringData: + username: {{ upcloud_username }} + password: {{ upcloud_password }} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2 new file mode 100644 index 0000000..363394a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: storage.csi.upcloud.com +spec: + attachRequired: true + podInfoOnMount: true + fsGroupPolicy: File \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 new file mode 100644 index 0000000..7173c6b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 @@ -0,0 +1,101 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-upcloud-node + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-upcloud-node + template: + metadata: + labels: + app: csi-upcloud-node + role: csi-upcloud + spec: + priorityClassName: system-node-critical + serviceAccount: csi-upcloud-node-sa + hostNetwork: true + containers: + - name: csi-node-driver-registrar + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:{{ upcloud_csi_node_image_tag }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/storage.csi.upcloud.com/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration/ + - name: csi-upcloud-plugin + image: ghcr.io/upcloudltd/upcloud-csi:{{ upcloud_csi_plugin_image_tag }} + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodehost=$(NODE_ID)" + - "--username=$(UPCLOUD_USERNAME)" + - "--password=$(UPCLOUD_PASSWORD)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: UPCLOUD_USERNAME + valueFrom: + secretKeyRef: + name: upcloud + key: username + - name: UPCLOUD_PASSWORD + valueFrom: + secretKeyRef: + name: upcloud + key: password + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: "Always" + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + imagePullSecrets: + - name: regcred + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/storage.csi.upcloud.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev +{% if upcloud_tolerations %} + tolerations: + {{ upcloud_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 new file mode 100644 index 0000000..3bc0bd5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 @@ -0,0 +1,185 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: csi-upcloud-controller-sa + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-upcloud-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-node-driver-registrar-role + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-node-driver-registrar-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-node-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "secrets" ] + verbs: [ "get", "list" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "csinodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "list", "watch", "create", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +# Attacher must be able to work with PVs, nodes and VolumeAttachments +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-attacher-role +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "nodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "csinodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments/status" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-attacher-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- +# Provisioner must be able to work with endpoints and leases in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: csi-upcloud-provisioner-cfg-role +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role-cfg-binding + namespace: kube-system +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: Role + name: csi-upcloud-provisioner-cfg-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-resizer-role +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims/status" ] + verbs: [ "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "list", "watch", "create", "update", "patch" ] + - apiGroups: [ "" ] + resources: [ "pods" ] + verbs: [ "watch", "list" ] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-upcloud-resizer-binding +subjects: + - kind: ServiceAccount + name: csi-upcloud-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-upcloud-resizer-role + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml new file mode 100644 index 0000000..0a4d02d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml @@ -0,0 +1,37 @@ +--- +external_vsphere_vcenter_port: "443" +external_vsphere_insecure: "true" +external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" +external_vsphere_version: "7.0u1" + +vsphere_syncer_image_tag: "v2.5.1" +vsphere_csi_attacher_image_tag: "v3.4.0" +vsphere_csi_controller: "v2.5.1" +vsphere_csi_liveness_probe_image_tag: "v2.6.0" +vsphere_csi_provisioner_image_tag: "v3.1.0" +vsphere_csi_snapshotter_image_tag: "v5.0.1" +vsphere_csi_node_driver_registrar_image_tag: "v2.5.0" +vsphere_csi_driver_image_tag: "v2.5.1" +vsphere_csi_resizer_tag: "v1.4.0" + +# Set to kube-system for backward compatibility, should be change to vmware-system-csi on the long run +vsphere_csi_namespace: "kube-system" + +vsphere_csi_controller_replicas: 1 + +csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}' + +vsphere_csi_aggressive_node_drain: False +vsphere_csi_aggressive_node_unreachable_timeout: 300 +vsphere_csi_aggressive_node_not_ready_timeout: 300 + +vsphere_csi_node_affinity: {} + +# If this is true, debug information will be displayed but +# may contain some private data, so it is recommended to set it to false +# in the production environment. +unsafe_show_logs: false + +# https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/docs/book/features/volume_snapshot.md#how-to-enable-volume-snapshot--restore-feature-in-vsphere-csi- +# according to the above link , we can controler the block-volume-snapshot parameter +vsphere_csi_block_volume_snapshot: false \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml new file mode 100644 index 0000000..5983fa0 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- include_tasks: vsphere-credentials-check.yml + +- name: vSphere CSI Driver | Generate CSI cloud-config + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0640 + with_items: + - vsphere-csi-cloud-config + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: vSphere CSI Driver | Generate Manifests + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0644 + with_items: + - vsphere-csi-namespace.yml + - vsphere-csi-driver.yml + - vsphere-csi-controller-rbac.yml + - vsphere-csi-node-rbac.yml + - vsphere-csi-controller-config.yml + - vsphere-csi-controller-deployment.yml + - vsphere-csi-controller-service.yml + - vsphere-csi-node.yml + register: vsphere_csi_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: vSphere CSI Driver | Generate a CSI secret manifest + command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml" + register: vsphere_csi_secret_manifest + when: inventory_hostname == groups['kube_control_plane'][0] + no_log: "{{ not (unsafe_show_logs|bool) }}" + +- name: vSphere CSI Driver | Apply a CSI secret manifest + command: + cmd: "{{ kubectl }} apply -f -" + stdin: "{{ vsphere_csi_secret_manifest.stdout }}" + when: inventory_hostname == groups['kube_control_plane'][0] + no_log: "{{ not (unsafe_show_logs|bool) }}" + +- name: vSphere CSI Driver | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" + state: "latest" + with_items: + - "{{ vsphere_csi_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml new file mode 100644 index 0000000..3504f60 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml @@ -0,0 +1,38 @@ +--- +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value + fail: + msg: "external_vsphere_vcenter_ip is missing" + when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip + +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value + fail: + msg: "external_vsphere_vcenter_port is missing" + when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port + +- name: External vSphere Cloud Provider | check external_vsphere_insecure value + fail: + msg: "external_vsphere_insecure is missing" + when: external_vsphere_insecure is not defined or not external_vsphere_insecure + +- name: External vSphere Cloud Provider | check external_vsphere_user value + fail: + msg: "external_vsphere_user is missing" + when: external_vsphere_user is not defined or not external_vsphere_user + +- name: External vSphere Cloud Provider | check external_vsphere_password value + fail: + msg: "external_vsphere_password is missing" + when: + - external_vsphere_password is not defined or not external_vsphere_password + +- name: External vSphere Cloud Provider | check external_vsphere_datacenter value + fail: + msg: "external_vsphere_datacenter is missing" + when: + - external_vsphere_datacenter is not defined or not external_vsphere_datacenter + +- name: External vSphere Cloud Provider | check external_vsphere_kubernetes_cluster_id value + fail: + msg: "external_vsphere_kubernetes_cluster_id is missing" + when: + - external_vsphere_kubernetes_cluster_id is not defined or not external_vsphere_kubernetes_cluster_id diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 new file mode 100644 index 0000000..ee5033a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 @@ -0,0 +1,9 @@ +[Global] +cluster-id = "{{ external_vsphere_kubernetes_cluster_id }}" + +[VirtualCenter "{{ external_vsphere_vcenter_ip }}"] +insecure-flag = "{{ external_vsphere_insecure }}" +user = "{{ external_vsphere_user }}" +password = "{{ external_vsphere_password }}" +port = "{{ external_vsphere_vcenter_port }}" +datacenters = "{{ external_vsphere_datacenter }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 new file mode 100644 index 0000000..d7ee521 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 @@ -0,0 +1,24 @@ +apiVersion: v1 +data: + "csi-migration": "false" +{% if external_vsphere_version >= "7.0" %} + "csi-auth-check": "true" +{% else %} + "csi-auth-check": "false" +{% endif %} + "online-volume-extend": "true" + "trigger-csi-fullsync": "false" + "async-query-volume": "true" + "improved-csi-idempotency": "true" + "improved-volume-topology": "true" + "block-volume-snapshot": "{{ vsphere_csi_block_volume_snapshot }}" + "csi-windows-support": "false" +{% if vsphere_csi_controller is version('v2.5.0', '>=') %} + "use-csinode-id": "true" + "pv-to-backingdiskobjectid-mapping": "false" + "cnsmgr-suspend-create-volume": "false" +{% endif %} +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: "{{ vsphere_csi_namespace }}" diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 new file mode 100644 index 0000000..1c1de2f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 @@ -0,0 +1,220 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" +spec: + replicas: {{ vsphere_csi_controller_replicas }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 0 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - vsphere-csi-controller + topologyKey: "kubernetes.io/hostname" + serviceAccountName: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - operator: "Exists" + key: node-role.kubernetes.io/master + effect: NoSchedule + - operator: "Exists" + key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% if vsphere_csi_aggressive_node_drain %} + # set below toleration if you need an aggressive pod eviction in case when + # node becomes not-ready or unreachable. Default is 300 seconds if not specified. + - key: node.kubernetes.io/not-ready + operator: Exists + effect: NoExecute + tolerationSeconds: {{ vsphere_csi_aggressive_node_not_ready_timeout }} + - key: node.kubernetes.io/unreachable + operator: Exists + effect: NoExecute + tolerationSeconds: {{ vsphere_csi_aggressive_node_unreachable_timeout }} +{% endif %} + dnsPolicy: "Default" + containers: + - name: csi-attacher + image: {{ kube_image_repo }}/sig-storage/csi-attacher:{{ vsphere_csi_attacher_image_tag }} + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% if external_vsphere_version >= "7.0" %} + - name: csi-resizer + image: {{ kube_image_repo }}/sig-storage/csi-resizer:{{ vsphere_csi_resizer_tag }} + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--handle-volume-inuse-error=false" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% endif %} + - name: vsphere-csi-controller + image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_controller }} + args: + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace={{ vsphere_csi_namespace }}" + - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}" + - "--use-gocsi=false" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: CSI_ENDPOINT + value: unix://{{ csi_endpoint }}/csi.sock + - name: X_CSI_MODE + value: "controller" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION +{% if external_vsphere_version >= "7.0u1" %} + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" +{% endif %} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: {{ csi_endpoint }} + name: socket-dir + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + - name: prometheus + containerPort: 2112 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: {{ kube_image_repo }}/sig-storage/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }} + args: + - "--v=4" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: {{ csi_endpoint }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: {{ csi_endpoint }} + - name: vsphere-syncer + image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/syncer:{{ vsphere_syncer_image_tag }} + args: + - "--leader-election" + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace={{ vsphere_csi_namespace }}" + - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION +{% if external_vsphere_version >= "7.0u1" %} + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" +{% endif %} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - name: csi-provisioner + image: {{ kube_image_repo }}/sig-storage/csi-provisioner:{{ vsphere_csi_provisioner_image_tag }} + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" +{% if vsphere_csi_controller is version('v2.2.0', '>=') %} + - "--kube-api-qps=100" + - "--kube-api-burst=100" +{% endif %} + - "--leader-election" + - "--default-fstype=ext4" + # needed only for topology aware setup + #- "--feature-gates=Topology=true" + #- "--strict-topology" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% if vsphere_csi_controller is version('v2.5.0', '>=') %} + - name: csi-snapshotter + image: {{ kube_image_repo }}/sig-storage/csi-snapshotter:{{ vsphere_csi_snapshotter_image_tag }} + args: + - "--v=4" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir +{% endif %} + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 new file mode 100644 index 0000000..fd614f9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 @@ -0,0 +1,86 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "pods", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] +{% if external_vsphere_version >= "7.0" %} + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] +{% if external_vsphere_version >= "7.0u1" %} + verbs: ["patch"] +{% else %} + verbs: ["update", "patch"] +{% endif %} +{% endif %} + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +{% if vsphere_csi_controller is version('v2.0.0', '>=') %} + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +{% endif %} + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses","csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["cns.vmware.com"] + resources: ["triggercsifullsyncs"] + verbs: ["create", "get", "update", "watch", "list"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvspherevolumemigrations"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "create", "update"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvolumeoperationrequests"] + verbs: ["create", "get", "list", "update", "delete"] + - apiGroups: [ "cns.vmware.com" ] + resources: [ "csinodetopologies" ] + verbs: ["get", "update", "watch", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshots" ] + verbs: [ "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotclasses" ] + verbs: [ "watch", "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents" ] + verbs: [ "create", "get", "list", "watch", "update", "delete", "patch" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents/status" ] + verbs: [ "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 new file mode 100644 index 0000000..75967ba --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: "{{ vsphere_csi_namespace }}" + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2 new file mode 100644 index 0000000..ad3260e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2 @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 new file mode 100644 index 0000000..0a28bda --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ vsphere_csi_namespace }}" \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 new file mode 100644 index 0000000..42896e1 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 @@ -0,0 +1,55 @@ +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-cluster-role +rules: + - apiGroups: ["cns.vmware.com"] + resources: ["csinodetopologies"] + verbs: ["create", "watch", "get", "patch" ] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-cluster-role-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +roleRef: + kind: ClusterRole + name: vsphere-csi-node-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-role + namespace: "{{ vsphere_csi_namespace }}" +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-binding + namespace: "{{ vsphere_csi_namespace }}" +subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +roleRef: + kind: Role + name: vsphere-csi-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 new file mode 100644 index 0000000..1a8370d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 @@ -0,0 +1,157 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: vsphere-csi-node + namespace: "{{ vsphere_csi_namespace }}" +spec: + selector: + matchLabels: + app: vsphere-csi-node + updateStrategy: + type: "RollingUpdate" + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + nodeSelector: + kubernetes.io/os: linux +{% if vsphere_csi_node_affinity %} + affinity: + {{ vsphere_csi_node_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} + serviceAccountName: vsphere-csi-node + hostNetwork: true + dnsPolicy: "ClusterFirstWithHostNet" + containers: + - name: node-driver-registrar + image: {{ kube_image_repo }}/sig-storage/csi-node-driver-registrar:{{ vsphere_csi_node_driver_registrar_image_tag }} +{% if external_vsphere_version < "7.0u1" %} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] +{% endif %} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + - name: vsphere-csi-node + image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_driver_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace={{ vsphere_csi_namespace }}" + - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}" + - "--use-gocsi=false" + imagePullPolicy: "Always" + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" # Maximum number of volumes that controller can publish to the node. If value is not set or zero Kubernetes decide how many volumes can be published by the controller to the node. + - name: X_CSI_MODE + value: "node" + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_DEBUG + value: "true" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + - name: blocks-dir + mountPath: /sys/block + - name: sys-devices-dir + mountPath: /sys/devices + ports: + - containerPort: 9808 + name: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: {{ kube_image_repo }}/sig-storage/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }} + args: +{% if external_vsphere_version >= "7.0u1" %} + - "--v=4" +{% endif %} + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev + - name: blocks-dir + hostPath: + path: /sys/block + type: Directory + - name: sys-devices-dir + hostPath: + path: /sys/devices + type: Directory + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/defaults/main.yml new file mode 100644 index 0000000..5d9ba29 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/defaults/main.yml @@ -0,0 +1,14 @@ +--- +external_hcloud_cloud: + hcloud_api_token: "" + token_secret_name: hcloud + + service_account_name: cloud-controller-manager + + controller_image_tag: "latest" + ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset + ## Format: + ## external_hcloud_cloud.controller_extra_args: + ## arg1: "value1" + ## arg2: "value2" + controller_extra_args: {} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml new file mode 100644 index 0000000..e09f99d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: External Hcloud Cloud Controller | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + group: "{{ kube_cert_group }}" + mode: 0640 + with_items: + - {name: external-hcloud-cloud-secret, file: external-hcloud-cloud-secret.yml} + - {name: external-hcloud-cloud-service-account, file: external-hcloud-cloud-service-account.yml} + - {name: external-hcloud-cloud-role-bindings, file: external-hcloud-cloud-role-bindings.yml} + - {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"} + + register: external_hcloud_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + tags: external-hcloud + +- name: External Hcloud Cloud Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ external_hcloud_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + tags: external-hcloud diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds-with-networks.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds-with-networks.yml.j2 new file mode 100644 index 0000000..c64a566 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds-with-networks.yml.j2 @@ -0,0 +1,72 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: hcloud-cloud-controller-manger +spec: + selector: + matchLabels: + app: hcloud-cloud-controller-manager + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: {{ external_hcloud_cloud.service_account_name }} + dnsPolicy: Default + tolerations: + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + operator: Exists + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + hostNetwork: true + containers: + - image: {{ docker_image_repo }}/hetznercloud/hcloud-cloud-controller-manager:{{ external_hcloud_cloud.controller_image_tag }} + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" + - "--allocate-node-cidrs=true" + - "--cluster-cidr={{ kube_pods_subnet }}" +{% if external_hcloud_cloud.controller_extra_args is defined %} + + args: +{% for key, value in external_hcloud_cloud.controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} +{% endif %} + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: {{ external_hcloud_cloud.token_secret_name }} + key: token + - name: HCLOUD_NETWORK + valueFrom: + secretKeyRef: + name: {{ external_hcloud_cloud.token_secret_name }} + key: network diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds.yml.j2 new file mode 100644 index 0000000..95473cd --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-controller-manager-ds.yml.j2 @@ -0,0 +1,63 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: hcloud-cloud-controller-manger +spec: + selector: + matchLabels: + app: hcloud-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: {{ external_hcloud_cloud.service_account_name }} + dnsPolicy: Default + tolerations: + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + containers: + - image: {{ docker_image_repo }}/hetznercloud/hcloud-cloud-controller-manager:{{ external_hcloud_cloud.controller_image_tag }} + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" +{% if external_hcloud_cloud.controller_extra_args is defined %} + args: +{% for key, value in external_hcloud_cloud.controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} +{% endif %} + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: {{ external_hcloud_cloud.token_secret_name }} + key: token diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2 new file mode 100644 index 0000000..270c947 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: {{ external_hcloud_cloud.service_account_name }} + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 new file mode 100644 index 0000000..c2ea894 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: "{{ external_hcloud_cloud.token_secret_name }}" + namespace: kube-system +data: + token: "{{ external_hcloud_cloud.hcloud_api_token | b64encode }}" +{% if external_hcloud_cloud.with_networks %} + network: "{{ network_id|b64encode }}" +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2 new file mode 100644 index 0000000..93277dd --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ external_hcloud_cloud.service_account_name }} + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/meta/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/meta/main.yml new file mode 100644 index 0000000..6e8c235 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/meta/main.yml @@ -0,0 +1,32 @@ +--- +dependencies: + - role: kubernetes-apps/external_cloud_controller/openstack + when: + - cloud_provider is defined + - cloud_provider == "external" + - external_cloud_provider is defined + - external_cloud_provider == "openstack" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - external-cloud-controller + - external-openstack + - role: kubernetes-apps/external_cloud_controller/vsphere + when: + - cloud_provider is defined + - cloud_provider == "external" + - external_cloud_provider is defined + - external_cloud_provider == "vsphere" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - external-cloud-controller + - external-vsphere + - role: kubernetes-apps/external_cloud_controller/hcloud + when: + - cloud_provider is defined + - cloud_provider == "external" + - external_cloud_provider is defined + - external_cloud_provider == "hcloud" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - external-cloud-controller + - external-hcloud diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS new file mode 100644 index 0000000..6cfbaa8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - alijahnas + - luckySB diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml new file mode 100644 index 0000000..71af4b4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml @@ -0,0 +1,24 @@ +--- +# The external cloud controller will need credentials to access +# openstack apis. Per default these values will be +# read from the environment. +external_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +external_openstack_username: "{{ lookup('env','OS_USERNAME') }}" +external_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +external_openstack_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}" +external_openstack_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}" +external_openstack_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}" +external_openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" +external_openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}" +external_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}" +external_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +external_openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" +external_openstack_cacert: "{{ lookup('env','OS_CACERT') }}" + +## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset +## Format: +## external_openstack_cloud_controller_extra_args: +## arg1: "value1" +## arg2: "value2" +external_openstack_cloud_controller_extra_args: {} +external_openstack_cloud_controller_image_tag: "v1.25.3" diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml new file mode 100644 index 0000000..ac3810c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- include_tasks: openstack-credential-check.yml + tags: external-openstack + +- name: External OpenStack Cloud Controller | Get base64 cacert + slurp: + src: "{{ external_openstack_cacert }}" + register: external_openstack_cacert_b64 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - external_openstack_cacert is defined + - external_openstack_cacert | length > 0 + tags: external-openstack + +- name: External OpenStack Cloud Controller | Get base64 cloud-config + set_fact: + external_openstack_cloud_config_secret: "{{ lookup('template', 'external-openstack-cloud-config.j2') | b64encode }}" + when: inventory_hostname == groups['kube_control_plane'][0] + tags: external-openstack + +- name: External OpenStack Cloud Controller | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + group: "{{ kube_cert_group }}" + mode: 0640 + with_items: + - {name: external-openstack-cloud-config-secret, file: external-openstack-cloud-config-secret.yml} + - {name: external-openstack-cloud-controller-manager-roles, file: external-openstack-cloud-controller-manager-roles.yml} + - {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml} + - {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml} + register: external_openstack_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + tags: external-openstack + +- name: External OpenStack Cloud Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ external_openstack_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + tags: external-openstack diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml new file mode 100644 index 0000000..9abc927 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml @@ -0,0 +1,66 @@ +--- +- name: External OpenStack Cloud Controller | check external_openstack_auth_url value + fail: + msg: "external_openstack_auth_url is missing" + when: external_openstack_auth_url is not defined or not external_openstack_auth_url + + +- name: External OpenStack Cloud Controller | check external_openstack_username or external_openstack_application_credential_name value + fail: + msg: "you must either set external_openstack_username or external_openstack_application_credential_name" + when: + - external_openstack_username is not defined or not external_openstack_username + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name + + +- name: External OpenStack Cloud Controller | check external_openstack_application_credential_id value + fail: + msg: "external_openstack_application_credential_id is missing" + when: + - external_openstack_application_credential_name is defined + - external_openstack_application_credential_name|length > 0 + - external_openstack_application_credential_id is not defined or not external_openstack_application_credential_id + + +- name: External OpenStack Cloud Controller | check external_openstack_application_credential_secret value + fail: + msg: "external_openstack_application_credential_secret is missing" + when: + - external_openstack_application_credential_name is defined + - external_openstack_application_credential_name|length > 0 + - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret + + +- name: External OpenStack Cloud Controller | check external_openstack_password value + fail: + msg: "external_openstack_password is missing" + when: + - external_openstack_username is defined + - external_openstack_username|length > 0 + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name + - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret + - external_openstack_password is not defined or not external_openstack_password + + +- name: External OpenStack Cloud Controller | check external_openstack_region value + fail: + msg: "external_openstack_region is missing" + when: external_openstack_region is not defined or not external_openstack_region + + +- name: External OpenStack Cloud Controller | check external_openstack_tenant_id value + fail: + msg: "one of external_openstack_tenant_id or external_openstack_tenant_name must be specified" + when: + - external_openstack_tenant_id is not defined or not external_openstack_tenant_id + - external_openstack_tenant_name is not defined or not external_openstack_tenant_name + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name + + +- name: External OpenStack Cloud Controller | check external_openstack_domain_id value + fail: + msg: "one of external_openstack_domain_id or external_openstack_domain_name must be specified" + when: + - external_openstack_domain_id is not defined or not external_openstack_domain_id + - external_openstack_domain_name is not defined or not external_openstack_domain_name + - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config-secret.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..2a6f6a8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config-secret.yml.j2 @@ -0,0 +1,13 @@ +# This YAML file contains secret objects, +# which are necessary to run external openstack cloud controller. + +kind: Secret +apiVersion: v1 +metadata: + name: external-openstack-cloud-config + namespace: kube-system +data: + cloud.conf: {{ external_openstack_cloud_config_secret }} +{% if external_openstack_cacert_b64.content is defined %} + ca.cert: {{ external_openstack_cacert_b64.content }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 new file mode 100644 index 0000000..adb08ae --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 @@ -0,0 +1,87 @@ +[Global] +auth-url="{{ external_openstack_auth_url }}" +{% if external_openstack_application_credential_id == "" and external_openstack_application_credential_name == "" %} +username="{{ external_openstack_username }}" +password="{{ external_openstack_password }}" +{% endif %} +{% if external_openstack_application_credential_id is defined and external_openstack_application_credential_id != "" %} +application-credential-id={{ external_openstack_application_credential_id }} +{% endif %} +{% if external_openstack_application_credential_name is defined and external_openstack_application_credential_name != "" %} +application-credential-name={{ external_openstack_application_credential_name }} +{% endif %} +{% if external_openstack_application_credential_secret is defined and external_openstack_application_credential_secret != "" %} +application-credential-secret={{ external_openstack_application_credential_secret }} +{% endif %} +region="{{ external_openstack_region }}" +{% if external_openstack_tenant_id is defined and external_openstack_tenant_id != "" %} +tenant-id="{{ external_openstack_tenant_id }}" +{% endif %} +{% if external_openstack_tenant_name is defined and external_openstack_tenant_name != "" %} +tenant-name="{{ external_openstack_tenant_name }}" +{% endif %} +{% if external_openstack_domain_name is defined and external_openstack_domain_name != "" %} +domain-name="{{ external_openstack_domain_name }}" +{% elif external_openstack_domain_id is defined and external_openstack_domain_id != "" %} +domain-id ="{{ external_openstack_domain_id }}" +{% endif %} +{% if external_openstack_cacert is defined and external_openstack_cacert != "" %} +ca-file="{{ kube_config_dir }}/external-openstack-cacert.pem" +{% endif %} + +[LoadBalancer] +create-monitor={{ external_openstack_lbaas_create_monitor }} +monitor-delay={{ external_openstack_lbaas_monitor_delay }} +monitor-timeout={{ external_openstack_lbaas_monitor_timeout }} +monitor-max-retries={{ external_openstack_lbaas_monitor_max_retries }} +{% if external_openstack_lbaas_method is defined %} +lb-method={{ external_openstack_lbaas_method }} +{% endif %} +{% if external_openstack_lbaas_network_id is defined %} +network-id={{ external_openstack_lbaas_network_id }} +{% endif %} +{% if external_openstack_lbaas_subnet_id is defined %} +subnet-id={{ external_openstack_lbaas_subnet_id }} +{% endif %} +{% if external_openstack_lbaas_floating_network_id is defined %} +floating-network-id={{ external_openstack_lbaas_floating_network_id }} +{% endif %} +{% if external_openstack_lbaas_floating_subnet_id is defined %} +floating-subnet-id={{ external_openstack_lbaas_floating_subnet_id }} +{% endif %} +{% if external_openstack_lbaas_manage_security_groups is defined %} +manage-security-groups={{ external_openstack_lbaas_manage_security_groups }} +{% endif %} +{% if external_openstack_lbaas_internal_lb is defined %} +internal-lb={{ external_openstack_lbaas_internal_lb }} +{% endif %} +{% if external_openstack_lbaas_provider is defined %} +lb-provider={{ external_openstack_lbaas_provider }} +use-octavia={{ external_openstack_lbaas_use_octavia }} +{% else %} +lb-provider=octavia +use-octavia=true +{% endif %} +{% if external_openstack_enable_ingress_hostname is defined %} +enable-ingress-hostname={{ external_openstack_enable_ingress_hostname | bool }} +{% endif %} +{% if external_openstack_ingress_hostname_suffix is defined %} +ingress-hostname-suffix={{ external_openstack_ingress_hostname_suffix | string | lower }} +{% endif %} +{% if external_openstack_max_shared_lb is defined %} +max-shared-lb={{ external_openstack_max_shared_lb }} +{% endif %} + +[Networking] +ipv6-support-disabled={{ external_openstack_network_ipv6_disabled | string | lower }} +{% for network_name in external_openstack_network_internal_networks %} +internal-network-name="{{ network_name }}" +{% endfor %} +{% for network_name in external_openstack_network_public_networks %} +public-network-name="{{ network_name }}" +{% endfor %} + +[Metadata] +{% if external_openstack_metadata_search_order is defined %} +search-order="{{ external_openstack_metadata_search_order }}" +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 new file mode 100644 index 0000000..4596f92 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 @@ -0,0 +1,96 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: openstack-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: openstack-cloud-controller-manager +spec: + selector: + matchLabels: + k8s-app: openstack-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: openstack-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + securityContext: + runAsUser: 999 + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: cloud-controller-manager + containers: + - name: openstack-cloud-controller-manager + image: {{ docker_image_repo }}/k8scloudprovider/openstack-cloud-controller-manager:{{ external_openstack_cloud_controller_image_tag }} + args: + - /bin/openstack-cloud-controller-manager + - --v=1 + - --cloud-config=$(CLOUD_CONFIG) + - --cloud-provider=openstack + - --cluster-name={{ cluster_name }} + - --use-service-account-credentials=true + - --bind-address=127.0.0.1 +{% for key, value in external_openstack_cloud_controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} + volumeMounts: + - mountPath: /etc/kubernetes/pki + name: k8s-certs + readOnly: true + - mountPath: /etc/ssl/certs + name: ca-certs + readOnly: true + - mountPath: /etc/config/cloud.conf + name: cloud-config-volume + readOnly: true + subPath: cloud.conf + - mountPath: {{ kube_config_dir }}/external-openstack-cacert.pem + name: cloud-config-volume + readOnly: true + subPath: ca.cert +{% if kubelet_flexvolumes_plugins_dir is defined %} + - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + name: flexvolume-dir +{% endif %} + resources: + requests: + cpu: 200m + env: + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + hostNetwork: true + volumes: +{% if kubelet_flexvolumes_plugins_dir is defined %} + - hostPath: + path: "{{ kubelet_flexvolumes_plugins_dir }}" + type: DirectoryOrCreate + name: flexvolume-dir +{% endif %} + - hostPath: + path: /etc/kubernetes/pki + type: DirectoryOrCreate + name: k8s-certs + - hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate + name: ca-certs + - name: cloud-config-volume + secret: + secretName: external-openstack-cloud-config diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-role-bindings.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-role-bindings.yml.j2 new file mode 100644 index 0000000..bbdf336 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-role-bindings.yml.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +kind: List +metadata: {} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 new file mode 100644 index 0000000..2ab3a5b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 @@ -0,0 +1,109 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - '*' + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - list + - get + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +kind: List +metadata: {} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml new file mode 100644 index 0000000..91b126e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml @@ -0,0 +1,14 @@ +--- +external_vsphere_vcenter_port: "443" +external_vsphere_insecure: "true" + +## A dictionary of extra arguments to add to the vsphere cloud controller manager daemonset +## Format: +## external_vsphere_cloud_controller_extra_args: +## arg1: "value1" +## arg2: "value2" +external_vsphere_cloud_controller_extra_args: {} +external_vsphere_cloud_controller_image_tag: "latest" + +external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}" +external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}" diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml new file mode 100644 index 0000000..9c25c72 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- include_tasks: vsphere-credentials-check.yml + +- name: External vSphere Cloud Controller | Generate CPI cloud-config + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0640 + with_items: + - external-vsphere-cpi-cloud-config + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Controller | Generate Manifests + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0644 + with_items: + - external-vsphere-cpi-cloud-config-secret.yml + - external-vsphere-cloud-controller-manager-roles.yml + - external-vsphere-cloud-controller-manager-role-bindings.yml + - external-vsphere-cloud-controller-manager-ds.yml + register: external_vsphere_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest + command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml" + register: external_vsphere_configmap_manifest + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest + command: + cmd: "{{ bin_dir }}/kubectl apply -f -" + stdin: "{{ external_vsphere_configmap_manifest.stdout }}" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: External vSphere Cloud Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" + state: "latest" + with_items: + - "{{ external_vsphere_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item }}" diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml new file mode 100644 index 0000000..b6c12b8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml @@ -0,0 +1,32 @@ +--- +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value + fail: + msg: "external_vsphere_vcenter_ip is missing" + when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip + +- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value + fail: + msg: "external_vsphere_vcenter_port is missing" + when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port + +- name: External vSphere Cloud Provider | check external_vsphere_insecure value + fail: + msg: "external_vsphere_insecure is missing" + when: external_vsphere_insecure is not defined or not external_vsphere_insecure + +- name: External vSphere Cloud Provider | check external_vsphere_user value + fail: + msg: "external_vsphere_user is missing" + when: external_vsphere_user is not defined or not external_vsphere_user + +- name: External vSphere Cloud Provider | check external_vsphere_password value + fail: + msg: "external_vsphere_password is missing" + when: + - external_vsphere_password is not defined or not external_vsphere_password + +- name: External vSphere Cloud Provider | check external_vsphere_datacenter value + fail: + msg: "external_vsphere_datacenter is missing" + when: + - external_vsphere_datacenter is not defined or not external_vsphere_datacenter diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 new file mode 100644 index 0000000..5f1068d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: vsphere-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: vsphere-cloud-controller-manager +spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + securityContext: + runAsUser: 0 + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: cloud-controller-manager + containers: + - name: vsphere-cloud-controller-manager + image: {{ gcr_image_repo }}/cloud-provider-vsphere/cpi/release/manager:{{ external_vsphere_cloud_controller_image_tag }} + args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf +{% for key, value in external_vsphere_cloud_controller_extra_args.items() %} + - "{{ '--' + key + '=' + value }}" +{% endfor %} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + resources: + requests: + cpu: 200m + hostNetwork: true + volumes: + - name: vsphere-config-volume + configMap: + name: cloud-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system +spec: + type: NodePort + ports: + - port: 43001 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 new file mode 100644 index 0000000..9f6107d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 @@ -0,0 +1,35 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - apiGroup: "" + kind: User + name: cloud-controller-manager +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager +kind: List +metadata: {} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 new file mode 100644 index 0000000..2cd7ad0 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 @@ -0,0 +1,91 @@ +apiVersion: v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list + - watch + - create + - update +kind: List +metadata: {} diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 new file mode 100644 index 0000000..5364f42 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 @@ -0,0 +1,11 @@ +# This YAML file contains secret objects, +# which are necessary to run external vsphere cloud controller. + +apiVersion: v1 +kind: Secret +metadata: + name: cpi-global-secret + namespace: kube-system +stringData: + {{ external_vsphere_vcenter_ip }}.username: "{{ external_vsphere_user }}" + {{ external_vsphere_vcenter_ip }}.password: "{{ external_vsphere_password }}" diff --git a/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 new file mode 100644 index 0000000..a32d876 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 @@ -0,0 +1,8 @@ +[Global] +port = "{{ external_vsphere_vcenter_port }}" +insecure-flag = "{{ external_vsphere_insecure }}" +secret-name = "cpi-global-secret" +secret-namespace = "kube-system" + +[VirtualCenter "{{ external_vsphere_vcenter_ip }}"] +datacenters = "{{ external_vsphere_datacenter }}" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml new file mode 100644 index 0000000..577fbff --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml @@ -0,0 +1,10 @@ +--- +cephfs_provisioner_namespace: "cephfs-provisioner" +cephfs_provisioner_cluster: ceph +cephfs_provisioner_monitors: ~ +cephfs_provisioner_admin_id: admin +cephfs_provisioner_secret: secret +cephfs_provisioner_storage_class: cephfs +cephfs_provisioner_reclaim_policy: Delete +cephfs_provisioner_claim_root: /volumes +cephfs_provisioner_deterministic_names: true diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml new file mode 100644 index 0000000..95a2f75 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -0,0 +1,80 @@ +--- + +- name: CephFS Provisioner | Remove legacy addon dir and manifests + file: + path: "{{ kube_config_dir }}/addons/cephfs_provisioner" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: CephFS Provisioner | Remove legacy namespace + command: > + {{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: CephFS Provisioner | Remove legacy storageclass + command: > + {{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: CephFS Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cephfs_provisioner" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: CephFS Provisioner | Templates list + set_fact: + cephfs_provisioner_templates: + - { name: 00-namespace, file: 00-namespace.yml, type: ns } + - { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret } + - { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa } + - { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole } + - { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding } + - { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role } + - { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding } + - { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: deploy } + - { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc } + cephfs_provisioner_templates_for_psp: + - { name: psp-cephfs-provisioner, file: psp-cephfs-provisioner.yml, type: psp } + +- name: CephFS Provisioner | Append extra templates to CephFS Provisioner Templates list for PodSecurityPolicy + set_fact: + cephfs_provisioner_templates: "{{ cephfs_provisioner_templates_for_psp + cephfs_provisioner_templates }}" + when: + - podsecuritypolicy_enabled + - cephfs_provisioner_namespace != "kube-system" + +- name: CephFS Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ cephfs_provisioner_templates }}" + register: cephfs_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: CephFS Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ cephfs_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ cephfs_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 new file mode 100644 index 0000000..2a2a67c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cephfs_provisioner_namespace }} + labels: + name: {{ cephfs_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..4c92ea6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2 @@ -0,0 +1,26 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: ["policy"] + resourceNames: ["cephfs-provisioner"] + resources: ["podsecuritypolicies"] + verbs: ["use"] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..cc5d5ff --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cephfs-provisioner +subjects: + - kind: ServiceAccount + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: cephfs-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..ac3bb33 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 @@ -0,0 +1,34 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} + labels: + app: cephfs-provisioner + version: {{ cephfs_provisioner_image_tag }} +spec: + replicas: 1 + selector: + matchLabels: + app: cephfs-provisioner + version: {{ cephfs_provisioner_image_tag }} + template: + metadata: + labels: + app: cephfs-provisioner + version: {{ cephfs_provisioner_image_tag }} + spec: + priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccount: cephfs-provisioner + containers: + - name: cephfs-provisioner + image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: PROVISIONER_NAME + value: ceph.com/cephfs + command: + - "/usr/local/bin/cephfs-provisioner" + args: + - "-id=cephfs-provisioner-1" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..76d146c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cephfs-provisioner + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..1fb80a1 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "delete"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..01ab87b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +subjects: + - kind: ServiceAccount + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cephfs-provisioner diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..31f87bd --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..dd0e37e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ cephfs_provisioner_storage_class }} +provisioner: ceph.com/cephfs +reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }} +parameters: + cluster: {{ cephfs_provisioner_cluster }} + monitors: {{ cephfs_provisioner_monitors }} + adminId: {{ cephfs_provisioner_admin_id }} + adminSecretName: cephfs-provisioner + adminSecretNamespace: {{ cephfs_provisioner_namespace }} + claimRoot: {{ cephfs_provisioner_claim_root }} + deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2 new file mode 100644 index 0000000..6d73c0c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2 @@ -0,0 +1,9 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} +type: Opaque +data: + secret: {{ cephfs_provisioner_secret | b64encode }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml new file mode 100644 index 0000000..278518b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml @@ -0,0 +1,9 @@ +--- +local_path_provisioner_enabled: false +local_path_provisioner_namespace: "local-path-storage" +local_path_provisioner_storage_class: "local-path" +local_path_provisioner_reclaim_policy: Delete +local_path_provisioner_claim_root: /opt/local-path-provisioner/ +local_path_provisioner_is_default_storageclass: "true" +local_path_provisioner_debug: false +local_path_provisioner_helper_image_tag: "latest" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml new file mode 100644 index 0000000..4cf26d8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: Local Path Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/local_path_provisioner" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Local Path Provisioner | Create claim root dir + file: + path: "{{ local_path_provisioner_claim_root }}" + state: directory + mode: 0755 + +- name: Local Path Provisioner | Render Template + set_fact: + local_path_provisioner_templates: + - { name: local-path-storage-ns, file: local-path-storage-ns.yml, type: ns } + - { name: local-path-storage-sa, file: local-path-storage-sa.yml, type: sa } + - { name: local-path-storage-cr, file: local-path-storage-cr.yml, type: cr } + - { name: local-path-storage-clusterrolebinding, file: local-path-storage-clusterrolebinding.yml, type: clusterrolebinding } + - { name: local-path-storage-cm, file: local-path-storage-cm.yml, type: cm } + - { name: local-path-storage-deployment, file: local-path-storage-deployment.yml, type: deployment } + - { name: local-path-storage-sc, file: local-path-storage-sc.yml, type: sc } + local_path_provisioner_templates_for_psp_not_system_ns: + - { name: local-path-storage-psp, file: local-path-storage-psp.yml, type: psp } + - { name: local-path-storage-psp-role, file: local-path-storage-psp-cr.yml, type: clusterrole } + - { name: local-path-storage-psp-rb, file: local-path-storage-psp-rb.yml, type: rolebinding } + +- name: Local Path Provisioner | Insert extra templates to Local Path Provisioner templates list for PodSecurityPolicy + set_fact: + local_path_provisioner_templates: "{{ local_path_provisioner_templates[:3] + local_path_provisioner_templates_for_psp_not_system_ns + local_path_provisioner_templates[3:] }}" + when: + - podsecuritypolicy_enabled + - local_path_provisioner_namespace != "kube-system" + +- name: Local Path Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ local_path_provisioner_templates }}" + register: local_path_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Local Path Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ local_path_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ local_path_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..317a71f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: {{ local_path_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2 new file mode 100644 index 0000000..8574312 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2 @@ -0,0 +1,59 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: {{ local_path_provisioner_namespace }} +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["{{ local_path_provisioner_claim_root }}"] + } + ] + } + setup: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + mkdir -m 0777 -p ${absolutePath} + teardown: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + rm -rf ${absolutePath} + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: {% if local_path_provisioner_helper_image_repo is defined %}{{ local_path_provisioner_helper_image_repo }}:{{ local_path_provisioner_helper_image_tag }}{% else %}busybox{% endif %} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2 new file mode 100644 index 0000000..c97511a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["endpoints", "persistentvolumes", "pods"] + verbs: ["*"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 new file mode 100644 index 0000000..6922691 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 @@ -0,0 +1,41 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: {{ local_path_provisioner_namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: {{ local_path_provisioner_image_repo }}:{{ local_path_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - local-path-provisioner + - start + - --config + - /etc/config/config.json +{% if local_path_provisioner_debug|default(false) %} + - --debug +{% endif %} + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2 new file mode 100644 index 0000000..1e8c6ce --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ local_path_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2 new file mode 100644 index 0000000..65a71f5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2 @@ -0,0 +1,15 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:local-path-provisioner + namespace: {{ local_path_provisioner_namespace }} +rules: + - apiGroups: + - policy + resourceNames: + - local-path-provisioner + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-rb.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-rb.yml.j2 new file mode 100644 index 0000000..c7e6d21 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-rb.yml.j2 @@ -0,0 +1,14 @@ +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:local-path-provisioner + namespace: {{ local_path_provisioner_namespace }} +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: {{ local_path_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: psp:local-path-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp.yml.j2 new file mode 100644 index 0000000..55d5adb --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp.yml.j2 @@ -0,0 +1,43 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: local-path-provisioner + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'secret' + - 'downwardAPI' + - 'hostPath' + allowedHostPaths: + - pathPrefix: "{{ local_path_provisioner_claim_root }}" + readOnly: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2 new file mode 100644 index 0000000..128a106 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: {{ local_path_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2 new file mode 100644 index 0000000..d662661 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ local_path_provisioner_storage_class }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ local_path_provisioner_is_default_storageclass }}" +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: {{ local_path_provisioner_reclaim_policy }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml new file mode 100644 index 0000000..16ed6ff --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -0,0 +1,20 @@ +--- +local_volume_provisioner_namespace: "kube-system" +# List of node labels to be copied to the PVs created by the provisioner +local_volume_provisioner_nodelabels: [] +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +local_volume_provisioner_tolerations: [] +local_volume_provisioner_use_node_name_only: false +# Leverages Ansible's string to Python datatype casting. Otherwise the dict_key isn't substituted. +# see https://github.com/ansible/ansible/issues/17324 +local_volume_provisioner_storage_classes: | + { + "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { + "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}", + "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", + "volume_mode": "Filesystem", + "fs_type": "ext4" + } + } diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml new file mode 100644 index 0000000..7add2da --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml @@ -0,0 +1,12 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: "Local Volume Provisioner | Ensure base dir {{ delegate_host_base_dir.1 }} is created on {{ delegate_host_base_dir.0 }}" + file: + path: "{{ local_volume_provisioner_storage_classes[delegate_host_base_dir.1].host_dir }}" + state: directory + owner: root + group: root + mode: "{{ local_volume_provisioner_directory_mode }}" + delegate_to: "{{ delegate_host_base_dir.0 }}" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml new file mode 100644 index 0000000..2308b5c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -0,0 +1,48 @@ +--- + +- name: Local Volume Provisioner | Ensure base dir is created on all hosts + include_tasks: basedirs.yml + loop_control: + loop_var: delegate_host_base_dir + loop: "{{ groups['k8s_cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}" + +- name: Local Volume Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/local_volume_provisioner" + state: directory + owner: root + group: root + mode: 0755 + +- name: Local Volume Provisioner | Templates list + set_fact: + local_volume_provisioner_templates: + - { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns } + - { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa } + - { name: local-volume-provisioner-clusterrole, file: local-volume-provisioner-clusterrole.yml, type: clusterrole } + - { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type: clusterrolebinding } + - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type: cm } + - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type: ds } + - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type: sc } + +- name: Local Volume Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ local_volume_provisioner_templates }}" + register: local_volume_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Local Volume Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ local_volume_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ local_volume_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 new file mode 100644 index 0000000..ada55dd --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-volume-provisioner-node-clusterrole + namespace: {{ local_volume_provisioner_namespace }} +rules: +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["watch"] +- apiGroups: ["", "events.k8s.io"] + resources: ["events"] + verbs: ["create", "update", "patch"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..bc286b2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-volume-provisioner-system-node + namespace: {{ local_volume_provisioner_namespace }} +subjects: +- kind: ServiceAccount + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: local-volume-provisioner-node-clusterrole + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 new file mode 100644 index 0000000..76625b6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 @@ -0,0 +1,33 @@ +# Macro to convert camelCase dictionary keys to snake_case keys +{% macro convert_keys(mydict) -%} + {% for key in mydict.keys()|list -%} + {% set key_split = key.split('_') -%} + {% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%} + {% set value = mydict.pop(key) -%} + {{ mydict.__setitem__(new_key, value) -}} + {{ convert_keys(value) if value is mapping else None -}} + {% endfor -%} +{% endmacro -%} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} +data: +{% if local_volume_provisioner_nodelabels | length > 0 %} + nodeLabelsForPV: | +{% for nodelabel in local_volume_provisioner_nodelabels %} + - {{ nodelabel }} +{% endfor %} +{% endif %} +{% if local_volume_provisioner_use_node_name_only %} + useNodeNameOnly: "true" +{% endif %} + storageClassMap: | +{% for class_name, storage_class in local_volume_provisioner_storage_classes.items() %} + {{ class_name }}: + {{- convert_keys(storage_class) }} + {{ storage_class | to_nice_yaml(indent=2) | indent(6) }} +{%- endfor %} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 new file mode 100644 index 0000000..a8747a2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 @@ -0,0 +1,66 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} + labels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} +spec: + selector: + matchLabels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} + template: + metadata: + labels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} + spec: + priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccountName: local-volume-provisioner + nodeSelector: + kubernetes.io/os: linux +{% if local_volume_provisioner_tolerations %} + tolerations: + {{ local_volume_provisioner_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} + containers: + - name: provisioner + image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + securityContext: + privileged: true + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: local-volume-provisioner + mountPath: /etc/provisioner/config + readOnly: true + - mountPath: /dev + name: provisioner-dev +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} + - name: local-volume-provisioner-hostpath-{{ class_name }} + mountPath: {{ class_config.mount_dir }} + mountPropagation: "HostToContainer" +{% endfor %} + volumes: + - name: local-volume-provisioner + configMap: + name: local-volume-provisioner + - name: provisioner-dev + hostPath: + path: /dev +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} + - name: local-volume-provisioner-hostpath-{{ class_name }} + hostPath: + path: {{ class_config.host_dir }} +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 new file mode 100644 index 0000000..04a7910 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ local_volume_provisioner_namespace }} + labels: + name: {{ local_volume_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 new file mode 100644 index 0000000..c78a16b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 new file mode 100644 index 0000000..81e0260 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 @@ -0,0 +1,12 @@ +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ class_name }} +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +{% if class_config.reclaim_policy is defined %} +reclaimPolicy: {{ class_config.reclaim_policy }} +{% endif %} +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/meta/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/meta/main.yml new file mode 100644 index 0000000..13bc8b6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/meta/main.yml @@ -0,0 +1,30 @@ +--- +dependencies: + - role: kubernetes-apps/external_provisioner/local_volume_provisioner + when: + - local_volume_provisioner_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - apps + - local-volume-provisioner + - external-provisioner + + - role: kubernetes-apps/external_provisioner/cephfs_provisioner + when: cephfs_provisioner_enabled + tags: + - apps + - cephfs-provisioner + - external-provisioner + + - role: kubernetes-apps/external_provisioner/rbd_provisioner + when: rbd_provisioner_enabled + tags: + - apps + - rbd-provisioner + - external-provisioner + - role: kubernetes-apps/external_provisioner/local_path_provisioner + when: local_path_provisioner_enabled + tags: + - apps + - local-path-provisioner + - external-provisioner diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml new file mode 100644 index 0000000..f09e25a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml @@ -0,0 +1,17 @@ +--- +rbd_provisioner_namespace: "rbd-provisioner" +rbd_provisioner_replicas: 2 +rbd_provisioner_monitors: ~ +rbd_provisioner_pool: kube +rbd_provisioner_admin_id: admin +rbd_provisioner_secret_name: ceph-secret-admin +rbd_provisioner_secret: ceph-key-admin +rbd_provisioner_user_id: kube +rbd_provisioner_user_secret_name: ceph-secret-user +rbd_provisioner_user_secret: ceph-key-user +rbd_provisioner_user_secret_namespace: rbd-provisioner +rbd_provisioner_fs_type: ext4 +rbd_provisioner_image_format: "2" +rbd_provisioner_image_features: layering +rbd_provisioner_storage_class: rbd +rbd_provisioner_reclaim_policy: Delete diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml new file mode 100644 index 0000000..1d08376 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml @@ -0,0 +1,80 @@ +--- + +- name: RBD Provisioner | Remove legacy addon dir and manifests + file: + path: "{{ kube_config_dir }}/addons/rbd_provisioner" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: RBD Provisioner | Remove legacy namespace + command: > + {{ kubectl }} delete namespace {{ rbd_provisioner_namespace }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: RBD Provisioner | Remove legacy storageclass + command: > + {{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: RBD Provisioner | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/rbd_provisioner" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: RBD Provisioner | Templates list + set_fact: + rbd_provisioner_templates: + - { name: 00-namespace, file: 00-namespace.yml, type: ns } + - { name: secret-rbd-provisioner, file: secret-rbd-provisioner.yml, type: secret } + - { name: sa-rbd-provisioner, file: sa-rbd-provisioner.yml, type: sa } + - { name: clusterrole-rbd-provisioner, file: clusterrole-rbd-provisioner.yml, type: clusterrole } + - { name: clusterrolebinding-rbd-provisioner, file: clusterrolebinding-rbd-provisioner.yml, type: clusterrolebinding } + - { name: role-rbd-provisioner, file: role-rbd-provisioner.yml, type: role } + - { name: rolebinding-rbd-provisioner, file: rolebinding-rbd-provisioner.yml, type: rolebinding } + - { name: deploy-rbd-provisioner, file: deploy-rbd-provisioner.yml, type: deploy } + - { name: sc-rbd-provisioner, file: sc-rbd-provisioner.yml, type: sc } + rbd_provisioner_templates_for_psp: + - { name: psp-rbd-provisioner, file: psp-rbd-provisioner.yml, type: psp } + +- name: RBD Provisioner | Append extra templates to RBD Provisioner Templates list for PodSecurityPolicy + set_fact: + rbd_provisioner_templates: "{{ rbd_provisioner_templates_for_psp + rbd_provisioner_templates }}" + when: + - podsecuritypolicy_enabled + - rbd_provisioner_namespace != "kube-system" + +- name: RBD Provisioner | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}" + mode: 0644 + with_items: "{{ rbd_provisioner_templates }}" + register: rbd_provisioner_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: RBD Provisioner | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ rbd_provisioner_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}" + state: "latest" + with_items: "{{ rbd_provisioner_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2 new file mode 100644 index 0000000..8bec2b5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ rbd_provisioner_namespace }} + labels: + name: {{ rbd_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrole-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrole-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..8fc7e4b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrole-rbd-provisioner.yml.j2 @@ -0,0 +1,30 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + resourceNames: ["kube-dns","coredns"] + verbs: ["list", "get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: ["policy"] + resourceNames: ["rbd-provisioner"] + resources: ["podsecuritypolicies"] + verbs: ["use"] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..ae9e6c5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rbd-provisioner +subjects: + - kind: ServiceAccount + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +roleRef: + kind: ClusterRole + name: rbd-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..dccc165 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 @@ -0,0 +1,40 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} + labels: + app: rbd-provisioner + version: {{ rbd_provisioner_image_tag }} +spec: + replicas: {{ rbd_provisioner_replicas }} + strategy: + type: Recreate + selector: + matchLabels: + app: rbd-provisioner + version: {{ rbd_provisioner_image_tag }} + template: + metadata: + labels: + app: rbd-provisioner + version: {{ rbd_provisioner_image_tag }} + spec: + priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccount: rbd-provisioner + containers: + - name: rbd-provisioner + image: {{ rbd_provisioner_image_repo }}:{{ rbd_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: PROVISIONER_NAME + value: ceph.com/rbd + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - "/usr/local/bin/rbd-provisioner" + args: + - "-id=${POD_NAME}" diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/psp-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/psp-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..c59effd --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/psp-rbd-provisioner.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: rbd-provisioner + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..d8dbbf9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..fcae1cc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +subjects: + - kind: ServiceAccount + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rbd-provisioner diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..c4dce64 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rbd-provisioner + namespace: {{ rbd_provisioner_namespace }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sc-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sc-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..9fea17a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sc-rbd-provisioner.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ rbd_provisioner_storage_class }} +provisioner: ceph.com/rbd +reclaimPolicy: {{ rbd_provisioner_reclaim_policy }} +parameters: + monitors: {{ rbd_provisioner_monitors }} + adminId: {{ rbd_provisioner_admin_id }} + adminSecretNamespace: {{ rbd_provisioner_namespace }} + adminSecretName: {{ rbd_provisioner_secret_name }} + pool: {{ rbd_provisioner_pool }} + userId: {{ rbd_provisioner_user_id }} + userSecretNamespace: {{ rbd_provisioner_user_secret_namespace }} + userSecretName: {{ rbd_provisioner_user_secret_name }} + fsType: "{{ rbd_provisioner_fs_type }}" + imageFormat: "{{ rbd_provisioner_image_format }}" + imageFeatures: {{ rbd_provisioner_image_features }} diff --git a/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/secret-rbd-provisioner.yml.j2 b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/secret-rbd-provisioner.yml.j2 new file mode 100644 index 0000000..a3b66d6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/secret-rbd-provisioner.yml.j2 @@ -0,0 +1,18 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ rbd_provisioner_secret_name }} + namespace: {{ rbd_provisioner_namespace }} +type: Opaque +data: + secret: {{ rbd_provisioner_secret | b64encode }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ rbd_provisioner_user_secret_name }} + namespace: {{ rbd_provisioner_user_secret_namespace }} +type: Opaque +data: + key: {{ rbd_provisioner_user_secret | b64encode }} diff --git a/kubespray/roles/kubernetes-apps/helm/.gitkeep b/kubespray/roles/kubernetes-apps/helm/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/kubespray/roles/kubernetes-apps/helm/defaults/main.yml b/kubespray/roles/kubernetes-apps/helm/defaults/main.yml new file mode 100644 index 0000000..4dc1cca --- /dev/null +++ b/kubespray/roles/kubernetes-apps/helm/defaults/main.yml @@ -0,0 +1,2 @@ +--- +helm_enabled: false diff --git a/kubespray/roles/kubernetes-apps/helm/tasks/main.yml b/kubespray/roles/kubernetes-apps/helm/tasks/main.yml new file mode 100644 index 0000000..fee247b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/helm/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Helm | Download helm + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.helm) }}" + +- name: Helm | Copy helm binary from download dir + copy: + src: "{{ local_release_dir }}/helm-{{ helm_version }}/linux-{{ image_arch }}/helm" + dest: "{{ bin_dir }}/helm" + mode: 0755 + remote_src: true + +- name: Helm | Get helm completion + command: "{{ bin_dir }}/helm completion bash" + changed_when: False + register: helm_completion + check_mode: False + +- name: Helm | Install helm completion + copy: + dest: /etc/bash_completion.d/helm.sh + content: "{{ helm_completion.stdout }}" + mode: 0755 + become: True diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS new file mode 100644 index 0000000..e8c0fcc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - kubespray-approvers +reviewers: + - kubespray-reviewers \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml new file mode 100644 index 0000000..4c8d97e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml @@ -0,0 +1,7 @@ +--- +alb_ingress_controller_namespace: kube-system +alb_ingress_aws_region: "us-east-1" + +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +alb_ingress_aws_debug: "false" diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml new file mode 100644 index 0000000..8a188a4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml @@ -0,0 +1,36 @@ +--- + +- name: ALB Ingress Controller | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/alb_ingress" + state: directory + owner: root + group: root + mode: 0755 + +- name: ALB Ingress Controller | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/alb_ingress/{{ item.file }}" + mode: 0644 + with_items: + - { name: alb-ingress-clusterrole, file: alb-ingress-clusterrole.yml, type: clusterrole } + - { name: alb-ingress-clusterrolebinding, file: alb-ingress-clusterrolebinding.yml, type: clusterrolebinding } + - { name: alb-ingress-ns, file: alb-ingress-ns.yml, type: ns } + - { name: alb-ingress-sa, file: alb-ingress-sa.yml, type: sa } + - { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy } + register: alb_ingress_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: ALB Ingress Controller | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ alb_ingress_controller_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/alb_ingress/{{ item.item.file }}" + state: "latest" + with_items: "{{ alb_ingress_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrole.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrole.yml.j2 new file mode 100644 index 0000000..bc03095 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrole.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} +rules: + - apiGroups: ["", "extensions"] + resources: ["configmaps", "endpoints", "nodes", "pods", "secrets", "events", "ingresses", "ingresses/status", "services"] + verbs: ["list", "create", "get", "update", "watch", "patch"] + - apiGroups: ["", "extensions"] + resources: ["nodes", "pods", "secrets", "services", "namespaces"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..71068f4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} +subjects: + - kind: ServiceAccount + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} +roleRef: + kind: ClusterRole + name: alb-ingress + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-deploy.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-deploy.yml.j2 new file mode 100644 index 0000000..a3d2834 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-deploy.yml.j2 @@ -0,0 +1,74 @@ +# Application Load Balancer (ALB) Ingress Controller Deployment Manifest. +# This manifest details sensible defaults for deploying an ALB Ingress Controller. +# GitHub: https://github.com/coreos/alb-ingress-controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alb-ingress-controller + labels: + k8s-app: alb-ingress-controller + # Namespace the ALB Ingress Controller should run in. Does not impact which + # namespaces it's able to resolve ingress resource for. For limiting ingress + # namespace scope, see --watch-namespace. + namespace: {{ alb_ingress_controller_namespace }} +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: alb-ingress-controller + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: alb-ingress-controller + spec: + containers: + - args: + # Limit the namespace where this ALB Ingress Controller deployment will + # resolve ingress resources. If left commented, all namespaces are used. + #- --watch-namespace=your-k8s-namespace + + # Setting the ingress-class flag below will ensure that only ingress resources with the + # annotation kubernetes.io/ingress.class: "alb" are respected by the controller. You may + # choose any class you'd like for this controller to respect. + - --ingress-class=alb + # Name of your cluster. Used when naming resources created + # by the ALB Ingress Controller, providing distinction between + # clusters. + - --cluster-name={{ cluster_name }} + + # Enables logging on all outbound requests sent to the AWS API. + # If logging is desired, set to true. + # - ---aws-api-debug +{% if alb_ingress_aws_debug %} + - --aws-api-debug +{% endif %} + # Maximum number of times to retry the aws calls. + # defaults to 10. + # - --aws-max-retries=10 + + # AWS region this ingress controller will operate in. + # If unspecified, it will be discovered from ec2metadata. + # List of regions: http://docs.aws.amazon.com/general/latest/gr/rande.html#vpc_region +{% if alb_ingress_aws_region is defined %} + - --aws-region={{ alb_ingress_aws_region }} +{% endif %} + + image: "{{ alb_ingress_image_repo }}:{{ alb_ingress_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + name: server + resources: {} + terminationMessagePath: /dev/termination-log + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 +{% if rbac_enabled %} + serviceAccountName: alb-ingress +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2 new file mode 100644 index 0000000..9f57537 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ alb_ingress_controller_namespace }} + labels: + name: {{ alb_ingress_controller_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2 new file mode 100644 index 0000000..692e3e3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alb-ingress + namespace: {{ alb_ingress_controller_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml new file mode 100644 index 0000000..b12a1a9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml @@ -0,0 +1,10 @@ +--- +cert_manager_namespace: "cert-manager" +cert_manager_user: 1001 +cert_manager_tolerations: [] +cert_manager_affinity: {} +cert_manager_nodeselector: {} + +## Change leader election namespace when deploying on GKE Autopilot that forbid the changes on kube-system namespace. +## See https://github.com/jetstack/cert-manager/issues/3717 +cert_manager_leader_election_namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml new file mode 100644 index 0000000..4af64ad --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -0,0 +1,56 @@ +--- + +- name: Cert Manager | Remove legacy addon dir and manifests + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: Cert Manager | Remove legacy namespace + command: > + {{ kubectl }} delete namespace {{ cert_manager_namespace }} + ignore_errors: true # noqa ignore-errors + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: Cert Manager | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Cert Manager | Templates list + set_fact: + cert_manager_templates: + - { name: cert-manager, file: cert-manager.yml, type: all } + - { name: cert-manager.crds, file: cert-manager.crds.yml, type: crd } + +- name: Cert Manager | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}" + mode: 0644 + with_items: "{{ cert_manager_templates }}" + register: cert_manager_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Cert Manager | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}" + state: "latest" + with_items: "{{ cert_manager_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.crds.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.crds.yml.j2 new file mode 100644 index 0000000..854cc43 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.crds.yml.j2 @@ -0,0 +1,4414 @@ +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + categories: + - cert-manager + scope: Cluster + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + caBundle: + description: PEM-encoded CA bundle (base64-encoded) used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the cert-manager controller system root certificates are used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: CABundleSecretRef is a reference to a Secret which contains the CABundle which will be used when connecting to Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundleSecretRef nor CABundle are defined, the cert-manager controller system root certificates are used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: acme.cert-manager.io + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + key: + description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + type: string + enum: + - HTTP-01 + - DNS-01 + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + status: + type: object + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + served: true + storage: true + subresources: + status: {} +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + type: object + required: + - issuerRef + - request + properties: + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + additionalProperties: + type: array + items: + type: string + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: array + items: + type: string + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + type: string + format: byte + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + status: + description: Status of the CertificateRequest. This is set and managed automatically. + type: object + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + type: string + format: byte + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + type: string + format: byte + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + type: array + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + caBundle: + description: PEM-encoded CA bundle (base64-encoded) used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the cert-manager controller system root certificates are used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: CABundleSecretRef is a reference to a Secret which contains the CABundle which will be used when connecting to Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundleSecretRef nor CABundle are defined, the cert-manager controller system root certificates are used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the Issuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: cert-manager.io + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + type: object + required: + - issuerRef + - secretName + properties: + additionalOutputFormats: + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. + type: array + items: + description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. + type: object + required: + - type + properties: + type: + description: Type is the name of the format type that should be written to the Certificate's target Secret. + type: string + enum: + - DER + - CombinedPEM + commonName: + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + type: array + items: + type: string + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailAddresses: + description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate. + type: array + items: + type: string + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + type: array + items: + type: string + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + type: object + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + literalSubject: + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. + type: string + privateKey: + description: Options to control private keys used for the Certificate. + type: object + properties: + algorithm: + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA`,`Ed25519` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm. + type: string + enum: + - RSA + - ECDSA + - Ed25519 + encoding: + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. + type: string + enum: + - PKCS1 + - PKCS8 + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + enum: + - Never + - Always + size: + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. + type: integer + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + type: integer + format: int32 + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + secretTemplate: + description: SecretTemplate defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret. + type: object + properties: + annotations: + description: Annotations is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + labels: + description: Labels is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + type: object + properties: + countries: + description: Countries to be used on the Certificate. + type: array + items: + type: string + localities: + description: Cities to be used on the Certificate. + type: array + items: + type: string + organizationalUnits: + description: Organizational Units to be used on the Certificate. + type: array + items: + type: string + organizations: + description: Organizations to be used on the Certificate. + type: array + items: + type: string + postalCodes: + description: Postal codes to be used on the Certificate. + type: array + items: + type: string + provinces: + description: State/Provinces to be used on the Certificate. + type: array + items: + type: string + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + type: array + items: + type: string + uris: + description: URIs is a list of URI subjectAltNames to be set on the Certificate. + type: array + items: + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: Status of the Certificate. This is set and managed automatically. + type: object + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + type: array + items: + description: CertificateCondition contains condition information for an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + type: string + format: date-time + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + type: string + format: date-time + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + type: string + format: date-time + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + type: string + format: date-time + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + group: acme.cert-manager.io + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - issuerRef + - request + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + type: string + format: byte + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + served: true + storage: true diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 new file mode 100644 index 0000000..47500e5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 @@ -0,0 +1,1178 @@ +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [ "gateway.networking.k8s.io" ] + resources: [ "httproutes" ] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: + - 'cert-manager-webhook-ca' + verbs: ["get", "list", "watch", "update"] +# It's not possible to grant CREATE permission on a single resourceName. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: {{ cert_manager_leader_election_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: {{ cert_manager_namespace }} + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "{{ cert_manager_version }}" + spec: + serviceAccountName: cert-manager-cainjector + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "{{ cert_manager_cainjector_image_repo }}:{{ cert_manager_cainjector_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --v=2 + - --leader-election-namespace={{ cert_manager_leader_election_namespace }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault +{% if cert_manager_tolerations %} + tolerations: + {{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% if cert_manager_nodeselector %} + nodeSelector: + {{ cert_manager_nodeselector | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_affinity %} + affinity: + {{ cert_manager_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} +--- +{% if cert_manager_trusted_internal_ca is defined %} +apiVersion: v1 +data: + internal-ca.pem: | + {{ cert_manager_trusted_internal_ca | indent(width=4, first=False) }} +kind: ConfigMap +metadata: + name: ca-internal-truststore + namespace: {{ cert_manager_namespace }} +--- +{% endif %} +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "{{ cert_manager_version }}" + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "{{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace={{ cert_manager_leader_election_namespace }} + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{% if cert_manager_tolerations %} + tolerations: + {{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% if cert_manager_nodeselector %} + nodeSelector: + {{ cert_manager_nodeselector | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_affinity %} + affinity: + {{ cert_manager_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_trusted_internal_ca is defined %} + volumeMounts: + - mountPath: /etc/ssl/certs/internal-ca.pem + name: ca-internal-truststore + subPath: internal-ca.pem + volumes: + - configMap: + defaultMode: 420 + name: ca-internal-truststore + name: ca-internal-truststore +{% endif %} +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" + spec: + serviceAccountName: cert-manager-webhook + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "{{ cert_manager_webhook_image_repo }}:{{ cert_manager_webhook_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{% if cert_manager_tolerations %} + tolerations: + {{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% if cert_manager_nodeselector %} + nodeSelector: + {{ cert_manager_nodeselector | to_nice_yaml | indent(width=8) }} +{% endif %} +{% if cert_manager_affinity %} + affinity: + {{ cert_manager_affinity | to_nice_yaml | indent(width=8) }} +{% endif %} +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" + annotations: + cert-manager.io/inject-ca-from-secret: "{{ cert_manager_namespace }}/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "{{ cert_manager_version }}" + annotations: + cert-manager.io/inject-ca-from-secret: "{{ cert_manager_namespace }}/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: {{ cert_manager_namespace }} + path: /validate diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml new file mode 100644 index 0000000..10cf1a7 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml @@ -0,0 +1,20 @@ +--- +ingress_nginx_namespace: "ingress-nginx" +ingress_nginx_host_network: false +ingress_publish_status_address: "" +ingress_nginx_nodeselector: + kubernetes.io/os: "linux" +ingress_nginx_tolerations: [] +ingress_nginx_insecure_port: 80 +ingress_nginx_secure_port: 443 +ingress_nginx_metrics_port: 10254 +ingress_nginx_configmap: {} +ingress_nginx_configmap_tcp_services: {} +ingress_nginx_configmap_udp_services: {} +ingress_nginx_extra_args: [] +ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +ingress_nginx_webhook_enabled: false +ingress_nginx_webhook_job_ttl: 1800 + +ingress_nginx_probe_initial_delay_seconds: 10 \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml new file mode 100644 index 0000000..cc0ed71 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -0,0 +1,60 @@ +--- + +- name: NGINX Ingress Controller | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/ingress_nginx" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: NGINX Ingress Controller | Templates list + set_fact: + ingress_nginx_templates: + - { name: 00-namespace, file: 00-namespace.yml, type: ns } + - { name: cm-ingress-nginx, file: cm-ingress-nginx.yml, type: cm } + - { name: cm-tcp-services, file: cm-tcp-services.yml, type: cm } + - { name: cm-udp-services, file: cm-udp-services.yml, type: cm } + - { name: sa-ingress-nginx, file: sa-ingress-nginx.yml, type: sa } + - { name: clusterrole-ingress-nginx, file: clusterrole-ingress-nginx.yml, type: clusterrole } + - { name: clusterrolebinding-ingress-nginx, file: clusterrolebinding-ingress-nginx.yml, type: clusterrolebinding } + - { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role } + - { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding } + - { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds } + ingress_nginx_templates_for_webhook: + - { name: admission-webhook-configuration, file: admission-webhook-configuration.yml, type: sa } + - { name: sa-admission-webhook, file: sa-admission-webhook.yml, type: sa } + - { name: clusterrole-admission-webhook, file: clusterrole-admission-webhook.yml, type: clusterrole } + - { name: clusterrolebinding-admission-webhook, file: clusterrolebinding-admission-webhook.yml, type: clusterrolebinding } + - { name: role-admission-webhook, file: role-admission-webhook.yml, type: role } + - { name: rolebinding-admission-webhook, file: rolebinding-admission-webhook.yml, type: rolebinding } + - { name: admission-webhook-job, file: admission-webhook-job.yml, type: job } + +- name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Templates list for webhook + set_fact: + ingress_nginx_templates: "{{ ingress_nginx_templates + ingress_nginx_templates_for_webhook }}" + when: ingress_nginx_webhook_enabled + +- name: NGINX Ingress Controller | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}" + mode: 0644 + with_items: "{{ ingress_nginx_templates }}" + register: ingress_nginx_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: NGINX Ingress Controller | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ ingress_nginx_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}" + state: "latest" + with_items: "{{ ingress_nginx_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2 new file mode 100644 index 0000000..1f12366 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ ingress_nginx_namespace }} + labels: + name: {{ ingress_nginx_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-configuration.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-configuration.yml.j2 new file mode 100644 index 0000000..d6878a0 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-configuration.yml.j2 @@ -0,0 +1,29 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: {{ ingress_nginx_namespace }} + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-job.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-job.yml.j2 new file mode 100644 index 0000000..03a8420 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/admission-webhook-job.yml.j2 @@ -0,0 +1,86 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-create + namespace: {{ ingress_nginx_namespace }} +spec: + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: "{{ ingress_nginx_kube_webhook_certgen_imae_repo }}:{{ ingress_nginx_kube_webhook_certgen_imae_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + name: create + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission + ttlSecondsAfterFinished: {{ ingress_nginx_webhook_job_ttl }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-patch + namespace: {{ ingress_nginx_namespace }} +spec: + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: "{{ ingress_nginx_kube_webhook_certgen_imae_repo }}:{{ ingress_nginx_kube_webhook_certgen_imae_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + name: patch + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission + ttlSecondsAfterFinished: {{ ingress_nginx_webhook_job_ttl }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-admission-webhook.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-admission-webhook.yml.j2 new file mode 100644 index 0000000..daa4753 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-admission-webhook.yml.j2 @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 new file mode 100644 index 0000000..767502e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 @@ -0,0 +1,36 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: [""] + resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["extensions","networking.k8s.io"] + resources: ["ingresses/status"] + verbs: ["update"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingressclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "watch"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-admission-webhook.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-admission-webhook.yml.j2 new file mode 100644 index 0000000..8791594 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-admission-webhook.yml.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2 new file mode 100644 index 0000000..ad83dc2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 new file mode 100644 index 0000000..9f1e3bb --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +{% if ingress_nginx_configmap %} +data: + {{ ingress_nginx_configmap | to_nice_yaml | indent(2) }} +{%- endif %} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 new file mode 100644 index 0000000..9752081 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-services + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +{% if ingress_nginx_configmap_tcp_services %} +data: + {{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }} +{%- endif %} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 new file mode 100644 index 0000000..a3f6613 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: udp-services + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +{% if ingress_nginx_configmap_udp_services %} +data: + {{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }} +{%- endif %} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 new file mode 100644 index 0000000..6ab4249 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 @@ -0,0 +1,141 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ingress-nginx-controller + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: {{ ingress_nginx_termination_grace_period_seconds }} +{% if ingress_nginx_host_network %} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet +{% endif %} +{% if ingress_nginx_nodeselector %} + nodeSelector: + {{ ingress_nginx_nodeselector | to_nice_yaml | indent(width=8) }} +{%- endif %} +{% if ingress_nginx_tolerations %} + tolerations: + {{ ingress_nginx_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} + priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + containers: + - name: ingress-nginx-controller + image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/ingress-nginx + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --annotations-prefix=nginx.ingress.kubernetes.io +{% if ingress_nginx_class is defined %} + - --ingress-class={{ ingress_nginx_class }} +{% else %} + - --watch-ingress-without-class=true +{% endif %} +{% if ingress_nginx_host_network %} + - --report-node-internal-ip-address +{% endif %} +{% if ingress_publish_status_address != "" %} + - --publish-status-address={{ ingress_publish_status_address }} +{% endif %} +{% for extra_arg in ingress_nginx_extra_args %} + - {{ extra_arg }} +{% endfor %} +{% if ingress_nginx_webhook_enabled %} + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key +{% endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 101 + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + ports: + - name: http + containerPort: 80 + hostPort: {{ ingress_nginx_insecure_port }} + - name: https + containerPort: 443 + hostPort: {{ ingress_nginx_secure_port }} + - name: metrics + containerPort: 10254 +{% if not ingress_nginx_host_network %} + hostPort: {{ ingress_nginx_metrics_port }} +{% endif %} +{% if ingress_nginx_webhook_enabled %} + - name: webhook + containerPort: 8443 + protocol: TCP +{% endif %} + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }} + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }} + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 +{% if ingress_nginx_webhook_enabled %} + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true +{% endif %} +{% if ingress_nginx_webhook_enabled %} + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.yml.j2 new file mode 100644 index 0000000..5d1bb01 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 new file mode 100644 index 0000000..58c0488 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 @@ -0,0 +1,68 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps", "pods", "secrets", "endpoints"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses", "ingressclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses/status"] + verbs: ["update"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingressclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: [{% if ingress_class is defined %}"ingress-controller-leader-{{ ingress_nginx_class | default('nginx') }}"{% else %}"ingress-controller-leader"{% endif %}] + verbs: ["get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: [{% if ingress_class is defined %}"ingress-controller-leader-{{ ingress_nginx_class | default('nginx') }}"{% else %}"ingress-controller-leader"{% endif %}] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["policy"] + resourceNames: ["ingress-nginx"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: [{% if ingress_class is defined %}"ingress-controller-leader-{{ ingress_nginx_class | default('nginx') }}"{% else %}"ingress-controller-leader"{% endif %}] + verbs: ["get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["get", "list", "watch"] diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-admission-webhook.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-admission-webhook.yml.j2 new file mode 100644 index 0000000..671912d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-admission-webhook.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: +- kind: ServiceAccount + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2 new file mode 100644 index 0000000..142d400 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-admission-webhook.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-admission-webhook.yml.j2 new file mode 100644 index 0000000..488a045 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-admission-webhook.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx-admission + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2 b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2 new file mode 100644 index 0000000..305d553 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/kubespray/roles/kubernetes-apps/ingress_controller/meta/main.yml b/kubespray/roles/kubernetes-apps/ingress_controller/meta/main.yml new file mode 100644 index 0000000..b269607 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/ingress_controller/meta/main.yml @@ -0,0 +1,22 @@ +--- +dependencies: + - role: kubernetes-apps/ingress_controller/ingress_nginx + when: ingress_nginx_enabled + tags: + - apps + - ingress-controller + - ingress-nginx + + - role: kubernetes-apps/ingress_controller/cert_manager + when: cert_manager_enabled + tags: + - apps + - ingress-controller + - cert-manager + + - role: kubernetes-apps/ingress_controller/alb_ingress_controller + when: ingress_alb_enabled + tags: + - apps + - ingress-controller + - ingress_alb diff --git a/kubespray/roles/kubernetes-apps/krew/defaults/main.yml b/kubespray/roles/kubernetes-apps/krew/defaults/main.yml new file mode 100644 index 0000000..d0de6b1 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/krew/defaults/main.yml @@ -0,0 +1,4 @@ +--- +krew_enabled: false +krew_root_dir: "/usr/local/krew" +krew_default_index_uri: https://github.com/kubernetes-sigs/krew-index.git diff --git a/kubespray/roles/kubernetes-apps/krew/tasks/krew.yml b/kubespray/roles/kubernetes-apps/krew/tasks/krew.yml new file mode 100644 index 0000000..bbc4dba --- /dev/null +++ b/kubespray/roles/kubernetes-apps/krew/tasks/krew.yml @@ -0,0 +1,38 @@ +--- +- name: Krew | Download krew + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.krew) }}" + +- name: Krew | krew env + template: + src: krew.j2 + dest: /etc/bash_completion.d/krew + mode: 0644 + +- name: Krew | Copy krew manifest + template: + src: krew.yml.j2 + dest: "{{ local_release_dir }}/krew.yml" + mode: 0644 + +- name: Krew | Install krew # noqa 301 305 + shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml" + environment: + KREW_ROOT: "{{ krew_root_dir }}" + KREW_DEFAULT_INDEX_URI: "{{ krew_default_index_uri | default('') }}" + +- name: Krew | Get krew completion + command: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} completion bash" + changed_when: False + register: krew_completion + check_mode: False + ignore_errors: yes # noqa ignore-errors + +- name: Krew | Install krew completion + copy: + dest: /etc/bash_completion.d/krew.sh + content: "{{ krew_completion.stdout }}" + mode: 0755 + become: True + when: krew_completion.rc == 0 diff --git a/kubespray/roles/kubernetes-apps/krew/tasks/main.yml b/kubespray/roles/kubernetes-apps/krew/tasks/main.yml new file mode 100644 index 0000000..40729e8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/krew/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Krew | install krew on kube_control_plane + import_tasks: krew.yml + +- name: Krew | install krew on localhost + import_tasks: krew.yml + delegate_to: localhost + connection: local + run_once: true + when: kubectl_localhost diff --git a/kubespray/roles/kubernetes-apps/krew/templates/krew.j2 b/kubespray/roles/kubernetes-apps/krew/templates/krew.j2 new file mode 100644 index 0000000..a666f6e --- /dev/null +++ b/kubespray/roles/kubernetes-apps/krew/templates/krew.j2 @@ -0,0 +1,6 @@ +# krew bash env(kubespray) +export KREW_ROOT="{{ krew_root_dir }}" +{% if krew_default_index_uri is defined %} +export KREW_DEFAULT_INDEX_URI='{{ krew_default_index_uri }}' +{% endif %} +export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" diff --git a/kubespray/roles/kubernetes-apps/krew/templates/krew.yml.j2 b/kubespray/roles/kubernetes-apps/krew/templates/krew.yml.j2 new file mode 100644 index 0000000..b0c6152 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/krew/templates/krew.yml.j2 @@ -0,0 +1,100 @@ +apiVersion: krew.googlecontainertools.github.com/v1alpha2 +kind: Plugin +metadata: + name: krew +spec: + version: "{{ krew_version }}" + homepage: https://krew.sigs.k8s.io/ + shortDescription: Package manager for kubectl plugins. + caveats: | + krew is now installed! To start using kubectl plugins, you need to add + krew's installation directory to your PATH: + + * macOS/Linux: + - Add the following to your ~/.bashrc or ~/.zshrc: + export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" + - Restart your shell. + + * Windows: Add %USERPROFILE%\.krew\bin to your PATH environment variable + + To list krew commands and to get help, run: + $ kubectl krew + For a full list of available plugins, run: + $ kubectl krew search + + You can find documentation at + https://krew.sigs.k8s.io/docs/user-guide/quickstart/. + + platforms: + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-darwin_amd64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: darwin + arch: amd64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-darwin_arm64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: darwin + arch: arm64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-linux_amd64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: linux + arch: amd64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-linux_arm + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: linux + arch: arm + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew + files: + - from: ./krew-linux_arm64 + to: krew + - from: ./LICENSE + to: . + selector: + matchLabels: + os: linux + arch: arm64 + - uri: {{ krew_download_url }} + sha256: {{ krew_archive_checksum }} + bin: krew.exe + files: + - from: ./krew-windows_amd64.exe + to: krew.exe + - from: ./LICENSE + to: . + selector: + matchLabels: + os: windows + arch: amd64 diff --git a/kubespray/roles/kubernetes-apps/meta/main.yml b/kubespray/roles/kubernetes-apps/meta/main.yml new file mode 100644 index 0000000..9c19fde --- /dev/null +++ b/kubespray/roles/kubernetes-apps/meta/main.yml @@ -0,0 +1,126 @@ +--- +dependencies: + - role: kubernetes-apps/ansible + when: + - inventory_hostname == groups['kube_control_plane'][0] + + - role: kubernetes-apps/helm + when: + - helm_enabled + tags: + - helm + + - role: kubernetes-apps/krew + when: + - krew_enabled + tags: + - krew + + - role: kubernetes-apps/registry + when: + - registry_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - registry + + - role: kubernetes-apps/metrics_server + when: + - metrics_server_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - metrics_server + + - role: kubernetes-apps/csi_driver/csi_crd + when: + - cinder_csi_enabled or csi_snapshot_controller_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - csi-driver + + - role: kubernetes-apps/csi_driver/cinder + when: + - cinder_csi_enabled + tags: + - cinder-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/aws_ebs + when: + - aws_ebs_csi_enabled + tags: + - aws-ebs-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/azuredisk + when: + - azure_csi_enabled + tags: + - azure-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/gcp_pd + when: + - gcp_pd_csi_enabled + tags: + - gcp-pd-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/upcloud + when: + - upcloud_csi_enabled + tags: + - upcloud-csi-driver + - csi-driver + + - role: kubernetes-apps/csi_driver/vsphere + when: + - vsphere_csi_enabled + tags: + - vsphere-csi-driver + - csi-driver + + - role: kubernetes-apps/persistent_volumes + when: + - persistent_volumes_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - persistent_volumes + + - role: kubernetes-apps/snapshots + when: inventory_hostname == groups['kube_control_plane'][0] + tags: + - snapshots + - csi-driver + + - role: kubernetes-apps/container_runtimes + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - container-runtimes + + - role: kubernetes-apps/container_engine_accelerator + when: nvidia_accelerator_enabled + tags: + - container_engine_accelerator + + - role: kubernetes-apps/cloud_controller/oci + when: + - cloud_provider is defined + - cloud_provider == "oci" + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - oci + + - role: kubernetes-apps/metallb + when: + - metallb_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - metallb + + - role: kubernetes-apps/argocd + when: + - argocd_enabled + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argocd diff --git a/kubespray/roles/kubernetes-apps/metallb/OWNERS b/kubespray/roles/kubernetes-apps/metallb/OWNERS new file mode 100644 index 0000000..b64c7bc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metallb/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - oomichi diff --git a/kubespray/roles/kubernetes-apps/metallb/defaults/main.yml b/kubespray/roles/kubernetes-apps/metallb/defaults/main.yml new file mode 100644 index 0000000..dc96fdc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metallb/defaults/main.yml @@ -0,0 +1,23 @@ +--- +metallb_enabled: false +metallb_log_level: info +metallb_protocol: "layer2" +metallb_port: "7472" +metallb_memberlist_port: "7946" +metallb_peers: [] +metallb_speaker_enabled: "{{ metallb_enabled }}" +metallb_speaker_nodeselector: + kubernetes.io/os: "linux" +metallb_controller_nodeselector: + kubernetes.io/os: "linux" +metallb_speaker_tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists +metallb_controller_tolerations: [] +metallb_pool_name: "loadbalanced" +metallb_auto_assign: true +metallb_avoid_buggy_ips: false diff --git a/kubespray/roles/kubernetes-apps/metallb/tasks/main.yml b/kubespray/roles/kubernetes-apps/metallb/tasks/main.yml new file mode 100644 index 0000000..e5920fc --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metallb/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: Kubernetes Apps | Check cluster settings for MetalLB + fail: + msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132" + when: + - "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp" + +- name: Kubernetes Apps | Check cluster settings for MetalLB + fail: + msg: "metallb_ip_range is mandatory to be specified for MetalLB" + when: + - metallb_ip_range is not defined or not metallb_ip_range + +- name: Kubernetes Apps | Check BGP peers for MetalLB + fail: + msg: "metallb_peers is mandatory when metallb_protocol is bgp and metallb_speaker_enabled" + when: + - metallb_protocol == 'bgp' and metallb_speaker_enabled + - metallb_peers is not defined or not metallb_peers + +- name: Kubernetes Apps | Check that the deprecated 'matallb_auto_assign' variable is not used anymore + fail: + msg: "'matallb_auto_assign' configuration variable is deprecated, please use 'metallb_auto_assign' instead" + when: + - matallb_auto_assign is defined + +- name: Kubernetes Apps | Check AppArmor status + command: which apparmor_parser + register: apparmor_status + when: + - podsecuritypolicy_enabled + - inventory_hostname == groups['kube_control_plane'][0] + failed_when: false + +- name: Kubernetes Apps | Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + when: + - podsecuritypolicy_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Apps | Lay Down MetalLB + become: true + template: + src: "{{ item }}.j2" + dest: "{{ kube_config_dir }}/{{ item }}" + mode: 0644 + with_items: ["metallb.yml", "metallb-config.yml"] + register: "rendering" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" + +- name: Kubernetes Apps | Install and configure MetalLB + kube: + name: "MetalLB" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" + state: "{{ item.changed | ternary('latest','present') }}" + become: true + with_items: "{{ rendering.results }}" + when: + - "inventory_hostname == groups['kube_control_plane'][0]" diff --git a/kubespray/roles/kubernetes-apps/metallb/templates/metallb-config.yml.j2 b/kubespray/roles/kubernetes-apps/metallb/templates/metallb-config.yml.j2 new file mode 100644 index 0000000..8fda506 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metallb/templates/metallb-config.yml.j2 @@ -0,0 +1,54 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | +{% if metallb_peers | length > 0 %} + peers: +{% for peer in metallb_peers %} + - peer-address: {{ peer.peer_address }} + peer-asn: {{ peer.peer_asn }} + my-asn: {{ peer.my_asn }} +{% if peer.password is defined %} + password: "{{ peer.password }}" +{% endif %} +{% if peer.source_address is defined %} + source-address: {{ peer.source_address }} +{% endif %} +{% if peer.node_selectors is defined %} + node-selectors: + {{ peer.node_selectors | to_yaml(indent=2, width=1337) | indent(8) }} +{% endif %} +{% endfor %} +{% endif %} + address-pools: + - name: {{ metallb_pool_name }} + protocol: {{ metallb_protocol }} + addresses: +{% for ip_range in metallb_ip_range %} + - {{ ip_range }} +{% endfor %} +{% if metallb_auto_assign == false %} + auto-assign: false +{% endif %} +{% if metallb_avoid_buggy_ips == true %} + avoid-buggy-ips: true +{% endif %} +{% if metallb_additional_address_pools is defined %}{% for pool in metallb_additional_address_pools %} + - name: {{ pool }} + protocol: {{ metallb_additional_address_pools[pool].protocol }} + addresses: +{% for ip_range in metallb_additional_address_pools[pool].ip_range %} + - {{ ip_range }} +{% endfor %} +{% if metallb_additional_address_pools[pool].auto_assign is defined %} + auto-assign: {{ metallb_additional_address_pools[pool].auto_assign }} +{% endif %} +{% if metallb_additional_address_pools[pool].avoid_buggy_ips is defined %} + avoid-buggy-ips: {{ metallb_additional_address_pools[pool].avoid_buggy_ips }} +{% endif %} +{% endfor %} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 b/kubespray/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 new file mode 100644 index 0000000..fc03cd2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 @@ -0,0 +1,425 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: metallb-system + labels: + app: metallb +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +{% if metallb_speaker_enabled %} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +{% endif %} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - services/status + verbs: + - update +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +--- +{% if metallb_speaker_enabled %} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: +- apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +{% endif %} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - secrets + verbs: + - create +- apiGroups: + - '' + resources: + - secrets + resourceNames: + - memberlist + verbs: + - list +- apiGroups: + - apps + resources: + - deployments + resourceNames: + - controller + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +{% if metallb_speaker_enabled %} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +{% endif %} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: config-watcher +subjects: +- kind: ServiceAccount + name: controller +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: controller +subjects: +- kind: ServiceAccount + name: controller +--- +{% if metallb_speaker_enabled %} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: '{{ metallb_port }}' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port={{ metallb_port }} + - --config=config + - --log-level={{ metallb_log_level }} + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + # needed when another software is also using memberlist / port 7946 + # when changing this default you also need to update the container ports definition + # and the PodSecurityPolicy hostPorts definition + #- name: METALLB_ML_BIND_PORT + # value: "{{ metallb_memberlist_port }}" + - name: METALLB_ML_LABELS + value: "app=metallb,component=speaker" + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + name: memberlist + key: secretkey + image: {{ metallb_speaker_image_repo }}:{{ metallb_version }} + name: speaker + ports: + - containerPort: {{ metallb_port }} + name: monitoring + - containerPort: {{ metallb_memberlist_port }} + name: memberlist-tcp + - containerPort: {{ metallb_memberlist_port }} + name: memberlist-udp + protocol: UDP + livenessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + readOnlyRootFilesystem: true + hostNetwork: true +{% if metallb_speaker_nodeselector %} + nodeSelector: + {{ metallb_speaker_nodeselector | to_nice_yaml | indent(width=8) }} +{%- endif %} + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 +{% if metallb_speaker_tolerations %} + tolerations: + {{ metallb_speaker_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} +{% endif %} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: '{{ metallb_port }}' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: controller + spec: + priorityClassName: system-cluster-critical +{% if metallb_controller_tolerations %} + tolerations: + {{ metallb_controller_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} +{% endif %} + containers: + - args: + - --port={{ metallb_port }} + - --config=config + - --log-level={{ metallb_log_level }} + env: + - name: METALLB_ML_SECRET_NAME + value: memberlist + - name: METALLB_DEPLOYMENT + value: controller + image: {{ metallb_controller_image_repo }}:{{ metallb_version }} + name: controller + ports: + - containerPort: {{ metallb_port }} + name: monitoring + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 +{% if metallb_controller_nodeselector %} + nodeSelector: + {{ metallb_controller_nodeselector | to_nice_yaml | indent(width=8) }} +{%- endif %} + securityContext: + runAsNonRoot: true + runAsUser: 65534 + fsGroup: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 diff --git a/kubespray/roles/kubernetes-apps/metrics_server/defaults/main.yml b/kubespray/roles/kubernetes-apps/metrics_server/defaults/main.yml new file mode 100644 index 0000000..4e247a1 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/defaults/main.yml @@ -0,0 +1,11 @@ +--- +metrics_server_container_port: 4443 +metrics_server_kubelet_insecure_tls: true +metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +metrics_server_metric_resolution: 15s +metrics_server_limits_cpu: 100m +metrics_server_limits_memory: 200Mi +metrics_server_requests_cpu: 100m +metrics_server_requests_memory: 200Mi +metrics_server_host_network: false +metrics_server_replicas: 1 \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/metrics_server/tasks/main.yml b/kubespray/roles/kubernetes-apps/metrics_server/tasks/main.yml new file mode 100644 index 0000000..1fe617d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/tasks/main.yml @@ -0,0 +1,57 @@ +--- +# If all masters have node role, there are no tainted master and toleration should not be specified. +- name: Check all masters are node or not + set_fact: + masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}" + +- name: Metrics Server | Delete addon dir + file: + path: "{{ kube_config_dir }}/addons/metrics_server" + state: absent + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - upgrade + +- name: Metrics Server | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/metrics_server" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Metrics Server | Templates list + set_fact: + metrics_server_templates: + - { name: auth-delegator, file: auth-delegator.yaml, type: clusterrolebinding } + - { name: auth-reader, file: auth-reader.yaml, type: rolebinding } + - { name: metrics-server-sa, file: metrics-server-sa.yaml, type: sa } + - { name: metrics-server-deployment, file: metrics-server-deployment.yaml, type: deploy } + - { name: metrics-server-service, file: metrics-server-service.yaml, type: service } + - { name: metrics-apiservice, file: metrics-apiservice.yaml, type: service } + - { name: resource-reader-clusterrolebinding, file: resource-reader-clusterrolebinding.yaml, type: clusterrolebinding } + - { name: resource-reader, file: resource-reader.yaml, type: clusterrole } + +- name: Metrics Server | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/metrics_server/{{ item.file }}" + mode: 0644 + with_items: "{{ metrics_server_templates }}" + register: metrics_server_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Metrics Server | Apply manifests + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/metrics_server/{{ item.item.file }}" + state: "latest" + with_items: "{{ metrics_server_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2 new file mode 100644 index 0000000..92f8204 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2 @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-server:system:auth-delegator + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2 new file mode 100644 index 0000000..e02b8ea --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2 @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metrics-server-auth-reader + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2 new file mode 100644 index 0000000..9341687 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2 @@ -0,0 +1,15 @@ +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + service: + name: metrics-server + namespace: kube-system + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: {{ metrics_server_kubelet_insecure_tls }} + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 new file mode 100644 index 0000000..86247b9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 @@ -0,0 +1,107 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metrics-server + namespace: kube-system + labels: + app.kubernetes.io/name: metrics-server + addonmanager.kubernetes.io/mode: Reconcile + version: {{ metrics_server_version }} +spec: + replicas: {{ metrics_server_replicas }} + selector: + matchLabels: + app.kubernetes.io/name: metrics-server + version: {{ metrics_server_version }} + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + name: metrics-server + labels: + app.kubernetes.io/name: metrics-server + version: {{ metrics_server_version }} + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + hostNetwork: {{ metrics_server_host_network | default(false) }} + containers: + - name: metrics-server + image: {{ metrics_server_image_repo }}:{{ metrics_server_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --logtostderr + - --cert-dir=/tmp + - --secure-port={{ metrics_server_container_port }} +{% if metrics_server_kubelet_preferred_address_types %} + - --kubelet-preferred-address-types={{ metrics_server_kubelet_preferred_address_types }} +{% endif %} + - --kubelet-use-node-status-port +{% if metrics_server_kubelet_insecure_tls %} + - --kubelet-insecure-tls +{% endif %} + - --metric-resolution={{ metrics_server_metric_resolution }} + ports: + - containerPort: {{ metrics_server_container_port }} + name: https + protocol: TCP + volumeMounts: + - name: tmp + mountPath: /tmp + livenessProbe: + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + failureThreshold: 3 + initialDelaySeconds: 40 + readinessProbe: + httpGet: + path: /readyz + port: https + scheme: HTTPS + periodSeconds: 10 + failureThreshold: 3 + initialDelaySeconds: 40 + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + allowPrivilegeEscalation: false + resources: + limits: + cpu: {{ metrics_server_limits_cpu }} + memory: {{ metrics_server_limits_memory }} + requests: + cpu: {{ metrics_server_requests_cpu }} + memory: {{ metrics_server_requests_memory }} + volumes: + - name: tmp + emptyDir: {} +{% if not masters_are_not_tainted %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% endif %} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - metrics-server + topologyKey: kubernetes.io/hostname + namespaces: + - kube-system \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2 new file mode 100644 index 0000000..94444ca --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metrics-server + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2 new file mode 100644 index 0000000..f1c3691 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: metrics-server + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: "metrics-server" +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: metrics-server + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2 new file mode 100644 index 0000000..038cfd8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:metrics-server + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2 b/kubespray/roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2 new file mode 100644 index 0000000..3d9ea81 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:metrics-server + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/metrics + verbs: + - get + - list + - watch diff --git a/kubespray/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml new file mode 100644 index 0000000..b8b4338 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# TODO: Handle Calico etcd -> kdd migration diff --git a/kubespray/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml new file mode 100644 index 0000000..db7e3f2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Canal | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ canal_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml new file mode 100644 index 0000000..ff56d24 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Flannel | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ flannel_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + +- name: Flannel | Wait for flannel subnet.env file presence + wait_for: + path: /run/flannel/subnet.env + delay: 5 + timeout: 600 diff --git a/kubespray/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml new file mode 100644 index 0000000..9f42501 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Kube-OVN | Start Resources + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ kube_ovn_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/roles/kubernetes-apps/network_plugin/kube-router/OWNERS b/kubespray/roles/kubernetes-apps/network_plugin/kube-router/OWNERS new file mode 100644 index 0000000..c95aad2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/kube-router/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - bozzo +reviewers: + - bozzo \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml new file mode 100644 index 0000000..25f9a71 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -0,0 +1,23 @@ +--- + +- name: kube-router | Start Resources + kube: + name: "kube-router" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/kube-router.yml" + resource: "ds" + namespace: "kube-system" + state: "latest" + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true + +- name: kube-router | Wait for kube-router pods to be ready + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors + register: pods_not_ready + until: pods_not_ready.stdout.find("kube-router")==-1 + retries: 30 + delay: 10 + ignore_errors: true + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true + changed_when: false diff --git a/kubespray/roles/kubernetes-apps/network_plugin/meta/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/meta/main.yml new file mode 100644 index 0000000..976e6ec --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -0,0 +1,36 @@ +--- +dependencies: + - role: kubernetes-apps/network_plugin/calico + when: kube_network_plugin == 'calico' + tags: + - calico + + - role: kubernetes-apps/network_plugin/canal + when: kube_network_plugin == 'canal' + tags: + - canal + + - role: kubernetes-apps/network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: + - flannel + + - role: kubernetes-apps/network_plugin/kube-ovn + when: kube_network_plugin == 'kube-ovn' + tags: + - kube-ovn + + - role: kubernetes-apps/network_plugin/weave + when: kube_network_plugin == 'weave' + tags: + - weave + + - role: kubernetes-apps/network_plugin/kube-router + when: kube_network_plugin == 'kube-router' + tags: + - kube-router + + - role: kubernetes-apps/network_plugin/multus + when: kube_network_plugin_multus + tags: + - multus diff --git a/kubespray/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml new file mode 100644 index 0000000..232d3e4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Multus | Start resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/kubespray/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/kubespray/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml new file mode 100644 index 0000000..bc0f932 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -0,0 +1,21 @@ +--- + +- name: Weave | Start Resources + kube: + name: "weave-net" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/weave-net.yml" + resource: "ds" + namespace: "kube-system" + state: "latest" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Weave | Wait for Weave to become available + uri: + url: http://127.0.0.1:6784/status + return_content: yes + register: weave_status + retries: 180 + delay: 5 + until: "weave_status.status == 200 and 'Status: ready' in weave_status.content" + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS new file mode 100644 index 0000000..6e44ceb --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - alijahnas +reviewers: diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml new file mode 100644 index 0000000..896d2d3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# To restrict which AZ the volume should be provisioned in +# set this value to true and set the list of relevant AZs +# For it to work, the flag aws_ebs_csi_enable_volume_scheduling +# in AWS EBS Driver must be true +restrict_az_provisioning: false +aws_ebs_availability_zones: + - eu-west-3c diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml new file mode 100644 index 0000000..b49acdf --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy AWS EBS CSI Storage Class template + template: + src: "aws-ebs-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add AWS EBS CSI Storage Class + kube: + name: aws-ebs-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/templates/aws-ebs-csi-storage-class.yml.j2 b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/templates/aws-ebs-csi-storage-class.yml.j2 new file mode 100644 index 0000000..1632646 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/templates/aws-ebs-csi-storage-class.yml.j2 @@ -0,0 +1,18 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: ebs-sc +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer +parameters: + csi.storage.k8s.io/fstype: xfs + type: gp2 +{% if restrict_az_provisioning %} +allowedTopologies: +- matchLabelExpressions: + - key: topology.ebs.csi.aws.com/zone + values: +{% for value in aws_ebs_availability_zones %} + - {{ value }} +{% endfor %} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml new file mode 100644 index 0000000..fc92e17 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml @@ -0,0 +1,3 @@ +--- +## Available values: Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS +storage_account_type: StandardSSD_LRS diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml new file mode 100644 index 0000000..9abffbe --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy Azure CSI Storage Class template + template: + src: "azure-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/azure-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add Azure CSI Storage Class + kube: + name: azure-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/azure-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2 b/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2 new file mode 100644 index 0000000..be5cb38 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: disk.csi.azure.com +provisioner: disk.csi.azure.com +parameters: + skuname: {{ storage_account_type }} +{% if azure_csi_tags is defined %} + tags: {{ azure_csi_tags }} +{% endif %} +reclaimPolicy: Delete +volumeBindingMode: Immediate +allowVolumeExpansion: true diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml new file mode 100644 index 0000000..5e35dd5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml @@ -0,0 +1,7 @@ +--- +storage_classes: + - name: cinder-csi + is_default: false + parameters: + availability: nova + allowVolumeExpansion: false diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml new file mode 100644 index 0000000..52de1c5 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy Cinder CSI Storage Class template + template: + src: "cinder-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/cinder-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add Cinder CSI Storage Class + kube: + name: cinder-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/cinder-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/templates/cinder-csi-storage-class.yml.j2 b/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/templates/cinder-csi-storage-class.yml.j2 new file mode 100644 index 0000000..be8ba13 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/cinder-csi/templates/cinder-csi-storage-class.yml.j2 @@ -0,0 +1,25 @@ +{% for class in storage_classes %} +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +provisioner: cinder.csi.openstack.org +volumeBindingMode: WaitForFirstConsumer +parameters: +{% for key, value in (class.parameters | default({})).items() %} + "{{ key }}": "{{ value }}" +{% endfor %} +{% if cinder_topology is defined and cinder_topology is sameas true %} +allowedTopologies: +- matchLabelExpressions: + - key: topology.cinder.csi.openstack.org/zone + values: +{% for zone in cinder_topology_zones %} + - "{{ zone }}" +{% endfor %} +{% endif %} +allowVolumeExpansion: {{ expand_persistent_volumes }} +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml new file mode 100644 index 0000000..d58706f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# Choose between pd-standard and pd-ssd +gcp_pd_csi_volume_type: pd-standard +gcp_pd_regional_replication_enabled: false +gcp_pd_restrict_zone_replication: false +gcp_pd_restricted_zones: + - europe-west1-b + - europe-west1-c diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml new file mode 100644 index 0000000..29997e7 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy GCP PD CSI Storage Class template + template: + src: "gcp-pd-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add GCP PD CSI Storage Class + kube: + name: gcp-pd-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/templates/gcp-pd-csi-storage-class.yml.j2 b/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/templates/gcp-pd-csi-storage-class.yml.j2 new file mode 100644 index 0000000..475eb4f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/templates/gcp-pd-csi-storage-class.yml.j2 @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-gce-pd +provisioner: pd.csi.storage.gke.io +parameters: + type: {{ gcp_pd_csi_volume_type }} +{% if gcp_pd_regional_replication_enabled %} + replication-type: regional-pd +{% endif %} +volumeBindingMode: WaitForFirstConsumer +{% if gcp_pd_restrict_zone_replication %} +allowedTopologies: +- matchLabelExpressions: + - key: topology.gke.io/zone + values: +{% for value in gcp_pd_restricted_zones %} + - {{ value }} +{% endfor %} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/meta/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/meta/main.yml new file mode 100644 index 0000000..fdfd807 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/meta/main.yml @@ -0,0 +1,43 @@ +--- +dependencies: + - role: kubernetes-apps/persistent_volumes/openstack + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack' ] + tags: + - persistent_volumes_openstack + + - role: kubernetes-apps/persistent_volumes/cinder-csi + when: + - cinder_csi_enabled + tags: + - persistent_volumes_cinder_csi + - cinder-csi-driver + + - role: kubernetes-apps/persistent_volumes/aws-ebs-csi + when: + - aws_ebs_csi_enabled + tags: + - persistent_volumes_aws_ebs_csi + - aws-ebs-csi-driver + + - role: kubernetes-apps/persistent_volumes/azuredisk-csi + when: + - azure_csi_enabled + tags: + - persistent_volumes_azure_csi + - azure-csi-driver + + - role: kubernetes-apps/persistent_volumes/gcp-pd-csi + when: + - gcp_pd_csi_enabled + tags: + - persistent_volumes_gcp_pd_csi + - gcp-pd-csi-driver + + - role: kubernetes-apps/persistent_volumes/upcloud-csi + when: + - upcloud_csi_enabled + tags: + - persistent_volumes_upcloud_csi + - upcloud-csi-driver \ No newline at end of file diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml new file mode 100644 index 0000000..05a3d94 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml @@ -0,0 +1,7 @@ +--- +persistent_volumes_enabled: false +storage_classes: + - name: standard + is_default: true + parameters: + availability: nova diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml new file mode 100644 index 0000000..3387e7f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template + template: + src: "openstack-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/openstack-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class + kube: + name: storage-class + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/openstack-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 b/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 new file mode 100644 index 0000000..0551e15 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 @@ -0,0 +1,15 @@ +{% for class in storage_classes %} +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +provisioner: kubernetes.io/cinder +parameters: +{% for key, value in (class.parameters | default({})).items() %} + "{{ key }}": "{{ value }}" +{% endfor %} +allowVolumeExpansion: {{ expand_persistent_volumes }} +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml new file mode 100644 index 0000000..5986e8c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml @@ -0,0 +1,12 @@ +--- +storage_classes: + - name: standard + is_default: true + expand_persistent_volumes: true + parameters: + tier: maxiops + - name: hdd + is_default: false + expand_persistent_volumes: true + parameters: + tier: hdd diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/tasks/main.yml b/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/tasks/main.yml new file mode 100644 index 0000000..26104a0 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Persistent Volumes | Copy UpCloud CSI Storage Class template + template: + src: "upcloud-csi-storage-class.yml.j2" + dest: "{{ kube_config_dir }}/upcloud-csi-storage-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Persistent Volumes | Add UpCloud CSI Storage Class + kube: + name: upcloud-csi + kubectl: "{{ bin_dir }}/kubectl" + resource: StorageClass + filename: "{{ kube_config_dir }}/upcloud-csi-storage-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 b/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 new file mode 100644 index 0000000..a40df9b --- /dev/null +++ b/kubespray/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 @@ -0,0 +1,16 @@ +{% for class in storage_classes %} +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +provisioner: storage.csi.upcloud.com +reclaimPolicy: Delete +parameters: +{% for key, value in (class.parameters | default({})).items() %} + "{{ key }}": "{{ value }}" +{% endfor %} +allowVolumeExpansion: {{ class.expand_persistent_volumes | default(true) | ternary("true","false") }} +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml b/kubespray/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml new file mode 100644 index 0000000..33f5269 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# Limits for calico apps +calico_policy_controller_cpu_limit: 1000m +calico_policy_controller_memory_limit: 256M +calico_policy_controller_cpu_requests: 30m +calico_policy_controller_memory_requests: 64M +calico_policy_controller_deployment_nodeselector: "kubernetes.io/os: linux" + +# SSL +calico_cert_dir: "/etc/calico/certs" +canal_cert_dir: "/etc/canal/certs" diff --git a/kubespray/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/kubespray/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml new file mode 100644 index 0000000..e4169b2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Set cert dir + set_fact: + calico_cert_dir: "{{ canal_cert_dir }}" + when: + - kube_network_plugin == 'canal' + tags: + - facts + - canal + +- name: Create calico-kube-controllers manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment} + - {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa} + - {name: calico-kube-controllers, file: calico-kube-cr.yml, type: clusterrole} + - {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding} + register: calico_kube_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + - rbac_enabled or item.type not in rbac_resources + +- name: Start of Calico kube controllers + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_kube_manifests.results }}" + register: calico_kube_controller_start + until: calico_kube_controller_start is succeeded + retries: 4 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" diff --git a/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 new file mode 100644 index 0000000..f89e4d6 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + k8s-app: calico-kube-controllers + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + {{ calico_policy_controller_deployment_nodeselector }} +{% if calico_datastore == "etcd" %} + hostNetwork: true +{% endif %} + serviceAccountName: calico-kube-controllers + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule +{% if policy_controller_extra_tolerations is defined %} + {{ policy_controller_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} +{% endif %} + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ calico_policy_controller_cpu_limit }} + memory: {{ calico_policy_controller_memory_limit }} + requests: + cpu: {{ calico_policy_controller_cpu_requests }} + memory: {{ calico_policy_controller_memory_requests }} + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + env: +{% if calico_datastore == "kdd" %} + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes +{% else %} + - name: ETCD_ENDPOINTS + value: "{{ etcd_access_addresses }}" + - name: ETCD_CA_CERT_FILE + value: "{{ calico_cert_dir }}/ca_cert.crt" + - name: ETCD_CERT_FILE + value: "{{ calico_cert_dir }}/cert.crt" + - name: ETCD_KEY_FILE + value: "{{ calico_cert_dir }}/key.pem" + volumeMounts: + - mountPath: {{ calico_cert_dir }} + name: etcd-certs + readOnly: true + volumes: + - hostPath: + path: {{ calico_cert_dir }} + name: etcd-certs +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 new file mode 100644 index 0000000..f74b291 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 @@ -0,0 +1,110 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers + namespace: kube-system +rules: +{% if calico_datastore == "etcd" %} + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + - nodes + - serviceaccounts + verbs: + - watch + - list + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - watch + - list +{% elif calico_datastore == "kdd" %} + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - watch + - list + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 new file mode 100644 index 0000000..8168056 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 new file mode 100644 index 0000000..269d0a1 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/policy_controller/meta/main.yml b/kubespray/roles/kubernetes-apps/policy_controller/meta/main.yml new file mode 100644 index 0000000..3f46b8d --- /dev/null +++ b/kubespray/roles/kubernetes-apps/policy_controller/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: policy_controller/calico + when: + - kube_network_plugin in ['calico', 'canal'] + - enable_network_policy + tags: + - policy-controller diff --git a/kubespray/roles/kubernetes-apps/registry/defaults/main.yml b/kubespray/roles/kubernetes-apps/registry/defaults/main.yml new file mode 100644 index 0000000..6353b7c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/defaults/main.yml @@ -0,0 +1,48 @@ +--- +registry_namespace: "kube-system" +registry_storage_class: "" +registry_storage_access_mode: "ReadWriteOnce" +registry_disk_size: "10Gi" +registry_port: 5000 +registry_replica_count: 1 + +# type of service: ClusterIP, LoadBalancer or NodePort +registry_service_type: "ClusterIP" +# you can specify your cluster IP address when registry_service_type is ClusterIP +registry_service_cluster_ip: "" +# you can specify your cloud provider assigned loadBalancerIP when registry_service_type is LoadBalancer +registry_service_loadbalancer_ip: "" +# annotations for managing Cloud Load Balancers +registry_service_annotations: {} +# you can specify the node port when registry_service_type is NodePort +registry_service_nodeport: "" + +# name of kubernetes secret for registry TLS certs +registry_tls_secret: "" + +registry_htpasswd: "" + +# registry configuration +# see: https://docs.docker.com/registry/configuration/#list-of-configuration-options +registry_config: + version: 0.1 + log: + fields: + service: registry + storage: + cache: + blobdescriptor: inmemory + http: + addr: :{{ registry_port }} + headers: + X-Content-Type-Options: [nosniff] + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + +registry_ingress_annotations: {} +registry_ingress_host: "" +# name of kubernetes secret for registry ingress TLS certs +registry_ingress_tls_secret: "" diff --git a/kubespray/roles/kubernetes-apps/registry/tasks/main.yml b/kubespray/roles/kubernetes-apps/registry/tasks/main.yml new file mode 100644 index 0000000..5090212 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/tasks/main.yml @@ -0,0 +1,109 @@ +--- +- name: Registry | check registry_service_type value + fail: + msg: "registry_service_type can only be 'ClusterIP', 'LoadBalancer' or 'NodePort'" + when: registry_service_type not in ['ClusterIP', 'LoadBalancer', 'NodePort'] + +- name: Registry | Stop if registry_service_cluster_ip is defined when registry_service_type is not 'ClusterIP' + fail: + msg: "registry_service_cluster_ip support only compatible with ClusterIP." + when: + - registry_service_cluster_ip is defined and registry_service_cluster_ip|length > 0 + - registry_service_type != "ClusterIP" + +- name: Registry | Stop if registry_service_loadbalancer_ip is defined when registry_service_type is not 'LoadBalancer' + fail: + msg: "registry_service_loadbalancer_ip support only compatible with LoadBalancer." + when: + - registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip|length > 0 + - registry_service_type != "LoadBalancer" + +- name: Registry | Stop if registry_service_nodeport is defined when registry_service_type is not 'NodePort' + fail: + msg: "registry_service_nodeport support only compatible with NodePort." + when: + - registry_service_nodeport is defined and registry_service_nodeport|length > 0 + - registry_service_type != "NodePort" + +- name: Registry | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/registry" + state: directory + owner: root + group: root + mode: 0755 + +- name: Registry | Templates list + set_fact: + registry_templates: + - { name: registry-ns, file: registry-ns.yml, type: ns } + - { name: registry-sa, file: registry-sa.yml, type: sa } + - { name: registry-svc, file: registry-svc.yml, type: svc } + - { name: registry-secrets, file: registry-secrets.yml, type: secrets } + - { name: registry-cm, file: registry-cm.yml, type: cm } + - { name: registry-rs, file: registry-rs.yml, type: rs } + registry_templates_for_psp: + - { name: registry-psp, file: registry-psp.yml, type: psp } + - { name: registry-cr, file: registry-cr.yml, type: clusterrole } + - { name: registry-crb, file: registry-crb.yml, type: rolebinding } + +- name: Registry | Append extra templates to Registry Templates list for PodSecurityPolicy + set_fact: + registry_templates: "{{ registry_templates[:2] + registry_templates_for_psp + registry_templates[2:] }}" + when: + - podsecuritypolicy_enabled + - registry_namespace != "kube-system" + +- name: Registry | Append nginx ingress templates to Registry Templates list when ingress enabled + set_fact: + registry_templates: "{{ registry_templates + [item] }}" + with_items: + - [{ name: registry-ing, file: registry-ing.yml, type: ing }] + when: ingress_nginx_enabled or ingress_alb_enabled + +- name: Registry | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + mode: 0644 + with_items: "{{ registry_templates }}" + register: registry_manifests + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Registry | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ registry_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" + state: "latest" + with_items: "{{ registry_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Registry | Create PVC manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + mode: 0644 + with_items: + - { name: registry-pvc, file: registry-pvc.yml, type: pvc } + register: registry_manifests + when: + - registry_storage_class != none and registry_storage_class + - registry_disk_size != none and registry_disk_size + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Registry | Apply PVC manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ registry_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" + state: "latest" + with_items: "{{ registry_manifests.results }}" + when: + - registry_storage_class != none and registry_storage_class + - registry_disk_size != none and registry_disk_size + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-cm.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-cm.yml.j2 new file mode 100644 index 0000000..b633dfd --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-cm.yml.j2 @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: registry-config + namespace: {{ registry_namespace }} +{% if registry_config %} +data: + config.yml: |- + {{ registry_config | to_yaml(indent=2, width=1337) | indent(width=4) }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 new file mode 100644 index 0000000..45f3fc4 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:registry + namespace: {{ registry_namespace }} +rules: + - apiGroups: + - policy + resourceNames: + - registry + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2 new file mode 100644 index 0000000..8589420 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2 @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:registry + namespace: {{ registry_namespace }} +subjects: + - kind: ServiceAccount + name: registry + namespace: {{ registry_namespace }} +roleRef: + kind: ClusterRole + name: psp:registry + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-ing.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-ing.yml.j2 new file mode 100644 index 0000000..29dfbba --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-ing.yml.j2 @@ -0,0 +1,27 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry + namespace: {{ registry_namespace }} +{% if registry_ingress_annotations %} + annotations: + {{ registry_ingress_annotations | to_nice_yaml(indent=2, width=1337) | indent(width=4) }} +{% endif %} +spec: +{% if registry_ingress_tls_secret %} + tls: + - hosts: + - {{ registry_ingress_host }} + secretName: {{ registry_ingress_tls_secret }} +{% endif %} + rules: + - host: {{ registry_ingress_host }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: registry + port: + number: {{ registry_port }} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 new file mode 100644 index 0000000..c224337 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ registry_namespace }} + labels: + name: {{ registry_namespace }} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 new file mode 100644 index 0000000..b04d8c2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: registry + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 new file mode 100644 index 0000000..dc3fa5a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: registry-pvc + namespace: {{ registry_namespace }} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + accessModes: + - {{ registry_storage_access_mode }} + storageClassName: {{ registry_storage_class }} + resources: + requests: + storage: {{ registry_disk_size }} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 new file mode 100644 index 0000000..47519f9 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 @@ -0,0 +1,115 @@ +--- +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: registry + namespace: {{ registry_namespace }} + labels: + k8s-app: registry + version: v{{ registry_image_tag }} + addonmanager.kubernetes.io/mode: Reconcile +spec: +{% if registry_storage_class != "" and registry_storage_access_mode == "ReadWriteMany" %} + replicas: {{ registry_replica_count }} +{% else %} + replicas: 1 +{% endif %} + selector: + matchLabels: + k8s-app: registry + version: v{{ registry_image_tag }} + template: + metadata: + labels: + k8s-app: registry + version: v{{ registry_image_tag }} + spec: + priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} + serviceAccountName: registry + securityContext: + fsGroup: 1000 + runAsUser: 1000 + containers: + - name: registry + image: {{ registry_image_repo }}:{{ registry_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /bin/registry + - serve + - /etc/docker/registry/config.yml + env: + - name: REGISTRY_HTTP_ADDR + value: :{{ registry_port }} + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: /var/lib/registry +{% if registry_htpasswd != "" %} + - name: REGISTRY_AUTH + value: "htpasswd" + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: "Registry Realm" + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: "/auth/htpasswd" +{% endif %} +{% if registry_tls_secret != "" %} + - name: REGISTRY_HTTP_TLS_CERTIFICATE + value: /etc/ssl/docker/tls.crt + - name: REGISTRY_HTTP_TLS_KEY + value: /etc/ssl/docker/tls.key +{% endif %} + volumeMounts: + - name: registry-pvc + mountPath: /var/lib/registry + - name: registry-config + mountPath: /etc/docker/registry +{% if registry_htpasswd != "" %} + - name: auth + mountPath: /auth + readOnly: true +{% endif %} +{% if registry_tls_secret != "" %} + - name: tls-cert + mountPath: /etc/ssl/docker + readOnly: true +{% endif %} + ports: + - containerPort: {{ registry_port }} + name: registry + protocol: TCP + livenessProbe: + httpGet: +{% if registry_tls_secret != "" %} + scheme: HTTPS +{% endif %} + path: / + port: {{ registry_port }} + readinessProbe: + httpGet: +{% if registry_tls_secret != "" %} + scheme: HTTPS +{% endif %} + path: / + port: {{ registry_port }} + volumes: + - name: registry-pvc +{% if registry_storage_class != "" %} + persistentVolumeClaim: + claimName: registry-pvc +{% else %} + emptyDir: {} +{% endif %} + - name: registry-config + configMap: + name: registry-config +{% if registry_htpasswd != "" %} + - name: auth + secret: + secretName: registry-secret + items: + - key: htpasswd + path: htpasswd +{% endif %} +{% if registry_tls_secret != "" %} + - name: tls-cert + secret: + secretName: {{ registry_tls_secret }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 new file mode 100644 index 0000000..20f9515 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: registry + namespace: {{ registry_namespace }} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2 new file mode 100644 index 0000000..80727d2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2 @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: registry-secret + namespace: {{ registry_namespace }} +type: Opaque +data: +{% if registry_htpasswd != "" %} + htpasswd: {{ registry_htpasswd | b64encode }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 b/kubespray/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 new file mode 100644 index 0000000..5485aa8 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: registry + namespace: {{ registry_namespace }} + labels: + k8s-app: registry + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeRegistry" +{% if registry_service_annotations %} + annotations: + {{ registry_service_annotations | to_nice_yaml(indent=2, width=1337) | indent(width=4) }} +{% endif %} +spec: + selector: + k8s-app: registry + type: {{ registry_service_type }} +{% if registry_service_type == "ClusterIP" and registry_service_cluster_ip != "" %} + clusterIP: {{ registry_service_cluster_ip }} +{% endif %} +{% if registry_service_type == "LoadBalancer" and registry_service_loadbalancer_ip != "" %} + loadBalancerIP: {{ registry_service_loadbalancer_ip }} +{% endif %} + ports: + - name: registry + port: {{ registry_port }} + protocol: TCP + targetPort: {{ registry_port }} +{% if registry_service_type == "NodePort" and registry_service_nodeport != "" %} + nodePort: {{ registry_service_nodeport }} +{% endif %} diff --git a/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml b/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml new file mode 100644 index 0000000..7b5dd73 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml @@ -0,0 +1,5 @@ +--- +snapshot_classes: + - name: cinder-csi-snapshot + is_default: false + force_create: true diff --git a/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml b/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml new file mode 100644 index 0000000..7e9116f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Kubernetes Snapshots | Copy Cinder CSI Snapshot Class template + template: + src: "cinder-csi-snapshot-class.yml.j2" + dest: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml" + mode: 0644 + register: manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kubernetes Snapshots | Add Cinder CSI Snapshot Class + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - manifests.changed diff --git a/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/templates/cinder-csi-snapshot-class.yml.j2 b/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/templates/cinder-csi-snapshot-class.yml.j2 new file mode 100644 index 0000000..b7e649f --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/cinder-csi/templates/cinder-csi-snapshot-class.yml.j2 @@ -0,0 +1,13 @@ +{% for class in snapshot_classes %} +--- +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1beta1 +metadata: + name: "{{ class.name }}" + annotations: + storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}" +driver: cinder.csi.openstack.org +deletionPolicy: Delete +parameters: + force-create: "{{ class.force_create }}" +{% endfor %} diff --git a/kubespray/roles/kubernetes-apps/snapshots/meta/main.yml b/kubespray/roles/kubernetes-apps/snapshots/meta/main.yml new file mode 100644 index 0000000..0eed56c --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/meta/main.yml @@ -0,0 +1,14 @@ +--- +dependencies: + - role: kubernetes-apps/snapshots/snapshot-controller + when: + - cinder_csi_enabled or csi_snapshot_controller_enabled + tags: + - snapshot-controller + + - role: kubernetes-apps/snapshots/cinder-csi + when: + - cinder_csi_enabled + tags: + - snapshot + - cinder-csi-driver diff --git a/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml new file mode 100644 index 0000000..c72dfb2 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml @@ -0,0 +1,3 @@ +--- +snapshot_controller_replicas: 1 +snapshot_controller_namespace: kube-system diff --git a/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml new file mode 100644 index 0000000..8663e8a --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: check if snapshot namespace exists + register: snapshot_namespace_exists + kube: + kubectl: "{{ bin_dir }}/kubectl" + name: "{{ snapshot_controller_namespace }}" + resource: "namespace" + state: "exists" + when: inventory_hostname == groups['kube_control_plane'][0] + tags: snapshot-controller + +- name: Snapshot Controller | Generate Manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: snapshot-ns, file: snapshot-ns.yml, apply: not snapshot_namespace_exists} + - {name: rbac-snapshot-controller, file: rbac-snapshot-controller.yml} + - {name: snapshot-controller, file: snapshot-controller.yml} + register: snapshot_controller_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + - item.apply | default(True) | bool + tags: snapshot-controller + +- name: Snapshot Controller | Apply Manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ snapshot_controller_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + tags: snapshot-controller diff --git a/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/rbac-snapshot-controller.yml.j2 b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/rbac-snapshot-controller.yml.j2 new file mode 100644 index 0000000..9413376 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/rbac-snapshot-controller.yml.j2 @@ -0,0 +1,85 @@ +# RBAC file for the snapshot controller. +# +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # rename if there are conflicts + name: snapshot-controller-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-role +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} +roleRef: + kind: ClusterRole + # change the name also here if the ClusterRole gets renamed + name: snapshot-controller-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ snapshot_controller_namespace }} + name: snapshot-controller-leaderelection +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: {{ snapshot_controller_namespace }} +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} +roleRef: + kind: Role + name: snapshot-controller-leaderelection + apiGroup: rbac.authorization.k8s.io diff --git a/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-controller.yml.j2 b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-controller.yml.j2 new file mode 100644 index 0000000..d17ffb3 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-controller.yml.j2 @@ -0,0 +1,40 @@ +# This YAML file shows how to deploy the snapshot controller + +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: snapshot-controller + namespace: {{ snapshot_controller_namespace }} +spec: + replicas: {{ snapshot_controller_replicas }} + selector: + matchLabels: + app: snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: snapshot-controller + spec: + serviceAccount: snapshot-controller + containers: + - name: snapshot-controller + image: {{ snapshot_controller_image_repo }}:{{ snapshot_controller_image_tag }} + args: + - "--v=5" + - "--leader-election=false" + imagePullPolicy: {{ k8s_image_pull_policy }} diff --git a/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2 b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2 new file mode 100644 index 0000000..bb30d60 --- /dev/null +++ b/kubespray/roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ snapshot_controller_namespace }} + labels: + name: {{ snapshot_controller_namespace }} diff --git a/kubespray/roles/kubernetes/client/defaults/main.yml b/kubespray/roles/kubernetes/client/defaults/main.yml new file mode 100644 index 0000000..83506a4 --- /dev/null +++ b/kubespray/roles/kubernetes/client/defaults/main.yml @@ -0,0 +1,8 @@ +--- +kubeconfig_localhost: false +kubeconfig_localhost_ansible_host: false +kubectl_localhost: false +artifacts_dir: "{{ inventory_dir }}/artifacts" + +kube_config_dir: "/etc/kubernetes" +kube_apiserver_port: "6443" diff --git a/kubespray/roles/kubernetes/client/tasks/main.yml b/kubespray/roles/kubernetes/client/tasks/main.yml new file mode 100644 index 0000000..cb9e81e --- /dev/null +++ b/kubespray/roles/kubernetes/client/tasks/main.yml @@ -0,0 +1,112 @@ +--- +- name: Set external kube-apiserver endpoint + set_fact: + external_apiserver_address: >- + {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%} + {{ loadbalancer_apiserver.address }} + {%- elif kubeconfig_localhost_ansible_host is defined and kubeconfig_localhost_ansible_host -%} + {{ hostvars[groups['kube_control_plane'][0]].ansible_host }} + {%- else -%} + {{ kube_apiserver_access_address }} + {%- endif -%} + external_apiserver_port: >- + {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%} + {{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- else -%} + {{ kube_apiserver_port }} + {%- endif -%} + tags: + - facts + +- name: Create kube config dir for current/ansible become user + file: + path: "{{ ansible_env.HOME | default('/root') }}/.kube" + mode: "0700" + state: directory + +- name: Copy admin kubeconfig to current/ansible become user home + copy: + src: "{{ kube_config_dir }}/admin.conf" + dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + remote_src: yes + mode: "0600" + backup: yes + +- name: Create kube artifacts dir + file: + path: "{{ artifacts_dir }}" + mode: "0750" + state: directory + delegate_to: localhost + connection: local + become: no + run_once: yes + when: kubeconfig_localhost + +- name: Wait for k8s apiserver + wait_for: + host: "{{ kube_apiserver_access_address }}" + port: "{{ kube_apiserver_port }}" + timeout: 180 + +- name: Get admin kubeconfig from remote host + slurp: + src: "{{ kube_config_dir }}/admin.conf" + run_once: yes + register: raw_admin_kubeconfig + when: kubeconfig_localhost + +- name: Convert kubeconfig to YAML + set_fact: + admin_kubeconfig: "{{ raw_admin_kubeconfig.content | b64decode | from_yaml }}" + when: kubeconfig_localhost + +- name: Override username in kubeconfig + set_fact: + final_admin_kubeconfig: "{{ admin_kubeconfig | combine(override_cluster_name, recursive=true) | combine(override_context, recursive=true) | combine(override_user, recursive=true) }}" + vars: + cluster_infos: "{{ admin_kubeconfig['clusters'][0]['cluster'] }}" + user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}" + username: "kubernetes-admin-{{ cluster_name }}" + context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}" + override_cluster_name: "{{ { 'clusters': [ { 'cluster': (cluster_infos|combine({'server': 'https://'+external_apiserver_address+':'+(external_apiserver_port|string)})), 'name': cluster_name } ] } }}" + override_context: "{{ { 'contexts': [ { 'context': { 'user': username, 'cluster': cluster_name }, 'name': context } ], 'current-context': context } }}" + override_user: "{{ { 'users': [ { 'name': username, 'user': user_certs } ] } }}" + when: kubeconfig_localhost + +- name: Write admin kubeconfig on ansible host + copy: + content: "{{ final_admin_kubeconfig | to_nice_yaml(indent=2) }}" + dest: "{{ artifacts_dir }}/admin.conf" + mode: 0600 + delegate_to: localhost + connection: local + become: no + run_once: yes + when: kubeconfig_localhost + +- name: Copy kubectl binary to ansible host + fetch: + src: "{{ bin_dir }}/kubectl" + dest: "{{ artifacts_dir }}/kubectl" + flat: yes + validate_checksum: no + register: copy_binary_result + until: copy_binary_result is not failed + retries: 20 + become: no + run_once: yes + when: kubectl_localhost + +- name: create helper script kubectl.sh on ansible host + copy: + content: | + #!/bin/bash + ${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@" + dest: "{{ artifacts_dir }}/kubectl.sh" + mode: 0755 + become: no + run_once: yes + delegate_to: localhost + connection: local + when: kubectl_localhost and kubeconfig_localhost diff --git a/kubespray/roles/kubernetes/control-plane/defaults/main/etcd.yml b/kubespray/roles/kubernetes/control-plane/defaults/main/etcd.yml new file mode 100644 index 0000000..344ce9b --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/defaults/main/etcd.yml @@ -0,0 +1,31 @@ +--- +# Set etcd user/group +etcd_owner: etcd + +# Note: This does not set up DNS entries. It simply adds the following DNS +# entries to the certificate +etcd_cert_alt_names: + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" + - "etcd" +etcd_cert_alt_ips: [] + +etcd_heartbeat_interval: "250" +etcd_election_timeout: "5000" + +# etcd_snapshot_count: "10000" + +etcd_metrics: "basic" + +## A dictionary of extra environment variables to add to etcd.env, formatted like: +## etcd_extra_vars: +## var1: "value1" +## var2: "value2" +## Note this is different from the etcd role with ETCD_ prfexi, caps, and underscores +etcd_extra_vars: {} + +# etcd_quota_backend_bytes: "2147483648" +# etcd_max_request_bytes: "1572864" + +etcd_compaction_retention: "8" diff --git a/kubespray/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml b/kubespray/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml new file mode 100644 index 0000000..52346fa --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml @@ -0,0 +1,118 @@ +--- +# bind address for kube-proxy +kube_proxy_bind_address: '0.0.0.0' + +# acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +# default value of 'application/json'. This field will control all connections to the server used by a particular +# client. +kube_proxy_client_accept_content_types: '' + +# burst allows extra queries to accumulate when a client is exceeding its rate. +kube_proxy_client_burst: 10 + +# contentType is the content type used when sending data to the server from this client. +kube_proxy_client_content_type: application/vnd.kubernetes.protobuf + +# kubeconfig is the path to a KubeConfig file. +# Leave as empty string to generate from other fields +kube_proxy_client_kubeconfig: '' + +# qps controls the number of queries per second allowed for this connection. +kube_proxy_client_qps: 5 + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +kube_proxy_config_sync_period: 15m0s + +### Conntrack +# maxPerCore is the maximum number of NAT connections to track +# per CPU core (0 to leave the limit as-is and ignore min). +kube_proxy_conntrack_max_per_core: 32768 + +# min is the minimum value of connect-tracking records to allocate, +# regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is). +kube_proxy_conntrack_min: 131072 + +# tcpCloseWaitTimeout is how long an idle conntrack entry +# in CLOSE_WAIT state will remain in the conntrack +# table. (e.g. '60s'). Must be greater than 0 to set. +kube_proxy_conntrack_tcp_close_wait_timeout: 1h0m0s + +# tcpEstablishedTimeout is how long an idle TCP connection will be kept open +# (e.g. '2s'). Must be greater than 0 to set. +kube_proxy_conntrack_tcp_established_timeout: 24h0m0s + +# Enables profiling via web interface on /debug/pprof handler. +# Profiling handlers will be handled by metrics server. +kube_proxy_enable_profiling: false + +# bind address for kube-proxy health check +kube_proxy_healthz_bind_address: 0.0.0.0:10256 + +# If using the pure iptables proxy, SNAT everything. Note that it breaks any +# policy engine. +kube_proxy_masquerade_all: false + +# If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. +# Must be within the range [0, 31]. +kube_proxy_masquerade_bit: 14 + +# The minimum interval of how often the iptables or ipvs rules can be refreshed as +# endpoints and services change (e.g. '5s', '1m', '2h22m'). +kube_proxy_min_sync_period: 0s + +# The maximum interval of how often iptables or ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). +# Must be greater than 0. +kube_proxy_sync_period: 30s + +# A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. +kube_proxy_exclude_cidrs: [] + +# The ipvs scheduler type when proxy mode is ipvs +# rr: round-robin +# lc: least connection +# dh: destination hashing +# sh: source hashing +# sed: shortest expected delay +# nq: never queue +kube_proxy_scheduler: rr + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# kube_proxy_tcp_timeout is the timeout value used for idle IPVS TCP sessions. +# The default value is 0, which preserves the current timeout value on the system. +kube_proxy_tcp_timeout: 0s + +# kube_proxy_tcp_fin_timeout is the timeout value used for IPVS TCP sessions after receiving a FIN. +# The default value is 0, which preserves the current timeout value on the system. +kube_proxy_tcp_fin_timeout: 0s + +# kube_proxy_udp_timeout is the timeout value used for IPVS UDP packets. +# The default value is 0, which preserves the current timeout value on the system. +kube_proxy_udp_timeout: 0s + +# The IP address and port for the metrics server to serve on +# (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) +kube_proxy_metrics_bind_address: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +kube_proxy_oom_score_adj: -999 + +# portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed +# in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen. +kube_proxy_port_range: '' + +# udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxyMode=userspace. +kube_proxy_udp_idle_timeout: 250ms diff --git a/kubespray/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml b/kubespray/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml new file mode 100644 index 0000000..e61bcb7 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/defaults/main/kube-scheduler.yml @@ -0,0 +1,33 @@ +--- +# Extra args passed by kubeadm +kube_kubeadm_scheduler_extra_args: {} + +# Associated interface must be reachable by the rest of the cluster, and by +# CLI/web clients. +kube_scheduler_bind_address: 0.0.0.0 + +# ClientConnection options (e.g. Burst, QPS) except from kubeconfig. +kube_scheduler_client_conn_extra_opts: {} + +# Additional KubeSchedulerConfiguration settings (e.g. metricsBindAddress). +kube_scheduler_config_extra_opts: {} + +# List of scheduler extenders (dicts), each holding the values of how to +# communicate with the extender. +kube_scheduler_extenders: [] + +# Leader Election options (e.g. ResourceName, RetryPerion) except from +# LeaseDuration and Renew deadline which are defined in following vars. +kube_scheduler_leader_elect_extra_opts: {} + +# Leader election lease duration +kube_scheduler_leader_elect_lease_duration: 15s + +# Leader election lease timeout +kube_scheduler_leader_elect_renew_deadline: 10s + +# Lisf of scheduling profiles (ditcs) supported by kube-scheduler +kube_scheduler_profiles: [] + +# Extra volume mounts +scheduler_extra_volumes: {} diff --git a/kubespray/roles/kubernetes/control-plane/defaults/main/main.yml b/kubespray/roles/kubernetes/control-plane/defaults/main/main.yml new file mode 100644 index 0000000..32cabb9 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/defaults/main/main.yml @@ -0,0 +1,230 @@ +--- +# disable upgrade cluster +upgrade_cluster_setup: false + +# By default the external API listens on all interfaces, this can be changed to +# listen on a specific address/interface. +# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too. +kube_apiserver_bind_address: 0.0.0.0 + +# A port range to reserve for services with NodePort visibility. +# Inclusive at both ends of the range. +kube_apiserver_node_port_range: "30000-32767" + +# ETCD backend for k8s data +kube_apiserver_storage_backend: etcd3 + +# CIS 1.2.26 +# Validate that the service account token +# in the request is actually present in etcd. +kube_apiserver_service_account_lookup: true + +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Associated interfaces must be reachable by the rest of the cluster, and by +# CLI/web clients. +kube_controller_manager_bind_address: 0.0.0.0 + +# Leader election lease durations and timeouts for controller-manager +kube_controller_manager_leader_elect_lease_duration: 15s +kube_controller_manager_leader_elect_renew_deadline: 10s + +# discovery_timeout modifies the discovery timeout +discovery_timeout: 5m0s + +# Instruct first master to refresh kubeadm token +kubeadm_refresh_token: true + +# Scale down coredns replicas to 0 if not using coredns dns_mode +kubeadm_scale_down_coredns_enabled: true + +# audit support +kubernetes_audit: false +# path to audit log file +audit_log_path: /var/log/audit/kube-apiserver-audit.log +# num days +audit_log_maxage: 30 +# the num of audit logs to retain +audit_log_maxbackups: 1 +# the max size in MB to retain +audit_log_maxsize: 100 +# policy file +audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml" +# custom audit policy rules (to replace the default ones) +# audit_policy_custom_rules: | +# - level: None +# users: [] +# verbs: [] +# resources: [] + +# audit log hostpath +audit_log_name: audit-logs +audit_log_hostpath: /var/log/kubernetes/audit +audit_log_mountpath: "{{ audit_log_path | dirname }}" + +# audit policy hostpath +audit_policy_name: audit-policy +audit_policy_hostpath: "{{ audit_policy_file | dirname }}" +audit_policy_mountpath: "{{ audit_policy_hostpath }}" + +# audit webhook support +kubernetes_audit_webhook: false + +# path to audit webhook config file +audit_webhook_config_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-webhook-config.yaml" +audit_webhook_server_url: "https://audit.app" +audit_webhook_server_extra_args: {} +audit_webhook_mode: batch +audit_webhook_batch_max_size: 100 +audit_webhook_batch_max_wait: 1s + +kube_controller_node_monitor_grace_period: 40s +kube_controller_node_monitor_period: 5s +kube_controller_terminated_pod_gc_threshold: 12500 +kube_apiserver_request_timeout: "1m0s" +kube_apiserver_pod_eviction_not_ready_timeout_seconds: "300" +kube_apiserver_pod_eviction_unreachable_timeout_seconds: "300" + +# 1.10+ admission plugins +kube_apiserver_enable_admission_plugins: [] + +# enable admission plugins configuration +kube_apiserver_admission_control_config_file: false + +# data structure to configure EventRateLimit admission plugin +# this should have the following structure: +# kube_apiserver_admission_event_rate_limits: +# : +# type: +# qps: +# burst: +# cache_size: +kube_apiserver_admission_event_rate_limits: {} + +kube_pod_security_use_default: false +kube_pod_security_default_enforce: baseline +kube_pod_security_default_enforce_version: latest +kube_pod_security_default_audit: restricted +kube_pod_security_default_audit_version: latest +kube_pod_security_default_warn: restricted +kube_pod_security_default_warn_version: latest +kube_pod_security_exemptions_usernames: [] +kube_pod_security_exemptions_runtime_class_names: [] +kube_pod_security_exemptions_namespaces: + - kube-system + +# 1.10+ list of disabled admission plugins +kube_apiserver_disable_admission_plugins: [] + +# extra runtime config +kube_api_runtime_config: [] + +## Enable/Disable Kube API Server Authentication Methods +kube_token_auth: false +kube_oidc_auth: false + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## Variables for webhook token authz https://kubernetes.io/docs/reference/access-authn-authz/webhook/ +# kube_webhook_authorization_url: https://... +kube_webhook_authorization: false +kube_webhook_authorization_url_skip_tls_verify: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' +# Copy oidc CA file to the following path if needed +# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem +# Optionally include a base64-encoded oidc CA cert +# kube_oidc_ca_cert: c3RhY2thYnVzZS5jb20... + +# List of the preferred NodeAddressTypes to use for kubelet connections. +kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP' + +## Extra args for k8s components passing by kubeadm +kube_kubeadm_apiserver_extra_args: {} +kube_kubeadm_controller_extra_args: {} + +## Extra control plane host volume mounts +## Example: +# apiserver_extra_volumes: +# - name: name +# hostPath: /host/path +# mountPath: /mount/path +# readOnly: true +apiserver_extra_volumes: {} +controller_manager_extra_volumes: {} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false +kube_encrypt_token: "{{ lookup('password', credentials_dir + '/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" +# Must be either: aescbc, secretbox or aesgcm +kube_encryption_algorithm: "secretbox" +# Which kubernetes resources to encrypt +kube_encryption_resources: [secrets] + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} + +secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +auto_renew_certificates_systemd_calendar: "{{ 'Mon *-*-1,2,3,4,5,6,7 03:' ~ + groups['kube_control_plane'].index(inventory_hostname) ~ '0:00' }}" +# kubeadm renews all the certificates during control plane upgrade. +# If we have requirement like without renewing certs upgrade the cluster, +# we can opt out from the default behavior by setting kubeadm_upgrade_auto_cert_renewal to false +kubeadm_upgrade_auto_cert_renewal: true diff --git a/kubespray/roles/kubernetes/control-plane/handlers/main.yml b/kubespray/roles/kubernetes/control-plane/handlers/main.yml new file mode 100644 index 0000000..e6bc321 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/handlers/main.yml @@ -0,0 +1,123 @@ +--- +- name: Master | restart kubelet + command: /bin/true + notify: + - Master | reload systemd + - Master | reload kubelet + - Master | wait for master static pods + +- name: Master | wait for master static pods + command: /bin/true + notify: + - Master | wait for the apiserver to be running + - Master | wait for kube-scheduler + - Master | wait for kube-controller-manager + +- name: Master | Restart apiserver + command: /bin/true + notify: + - Master | Remove apiserver container docker + - Master | Remove apiserver container containerd/crio + - Master | wait for the apiserver to be running + +- name: Master | Restart kube-scheduler + command: /bin/true + notify: + - Master | Remove scheduler container docker + - Master | Remove scheduler container containerd/crio + - Master | wait for kube-scheduler + +- name: Master | Restart kube-controller-manager + command: /bin/true + notify: + - Master | Remove controller manager container docker + - Master | Remove controller manager container containerd/crio + - Master | wait for kube-controller-manager + +- name: Master | reload systemd + systemd: + daemon_reload: true + +- name: Master | reload kubelet + service: + name: kubelet + state: restarted + +- name: Master | Remove apiserver container docker + shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f + register: remove_apiserver_container + retries: 10 + until: remove_apiserver_container.rc == 0 + delay: 1 + when: container_manager == "docker" + +- name: Master | Remove apiserver container containerd/crio + shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + register: remove_apiserver_container + retries: 10 + until: remove_apiserver_container.rc == 0 + delay: 1 + when: container_manager in ['containerd', 'crio'] + +- name: Master | Remove scheduler container docker + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + register: remove_scheduler_container + retries: 10 + until: remove_scheduler_container.rc == 0 + delay: 1 + when: container_manager == "docker" + +- name: Master | Remove scheduler container containerd/crio + shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + register: remove_scheduler_container + retries: 10 + until: remove_scheduler_container.rc == 0 + delay: 1 + when: container_manager in ['containerd', 'crio'] + +- name: Master | Remove controller manager container docker + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + register: remove_cm_container + retries: 10 + until: remove_cm_container.rc == 0 + delay: 1 + when: container_manager == "docker" + +- name: Master | Remove controller manager container containerd/crio + shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" + register: remove_cm_container + retries: 10 + until: remove_cm_container.rc == 0 + delay: 1 + when: container_manager in ['containerd', 'crio'] + +- name: Master | wait for kube-scheduler + vars: + endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}" + uri: + url: https://{{ endpoint }}:10259/healthz + validate_certs: no + register: scheduler_result + until: scheduler_result.status == 200 + retries: 60 + delay: 1 + +- name: Master | wait for kube-controller-manager + vars: + endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}" + uri: + url: https://{{ endpoint }}:10257/healthz + validate_certs: no + register: controller_manager_result + until: controller_manager_result.status == 200 + retries: 60 + delay: 1 + +- name: Master | wait for the apiserver to be running + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + register: result + until: result.status == 200 + retries: 60 + delay: 1 diff --git a/kubespray/roles/kubernetes/control-plane/meta/main.yml b/kubespray/roles/kubernetes/control-plane/meta/main.yml new file mode 100644 index 0000000..2657006 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/meta/main.yml @@ -0,0 +1,11 @@ +--- +dependencies: + - role: kubernetes/tokens + when: kube_token_auth + tags: + - k8s-secrets + - role: adduser + user: "{{ addusers.etcd }}" + when: + - etcd_deployment_type == "kubeadm" + - not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) diff --git a/kubespray/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml b/kubespray/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml new file mode 100644 index 0000000..d01f511 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml @@ -0,0 +1,19 @@ +--- + +- name: Check which kube-control nodes are already members of the cluster + command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json" + register: kube_control_planes_raw + ignore_errors: yes + changed_when: false + +- name: Set fact joined_control_panes + set_fact: + joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}" + delegate_to: item + loop: "{{ groups['kube_control_plane'] }}" + when: kube_control_planes_raw is succeeded + run_once: yes + +- name: Set fact first_kube_control_plane + set_fact: + first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}" diff --git a/kubespray/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/kubespray/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml new file mode 100644 index 0000000..b88f57c --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml @@ -0,0 +1,42 @@ +--- +- name: Check if secret for encrypting data at rest already exist + stat: + path: "{{ kube_cert_dir }}/secrets_encryption.yaml" + get_attributes: no + get_checksum: no + get_mime: no + register: secrets_encryption_file + +- name: Slurp secrets_encryption file if it exists + slurp: + src: "{{ kube_cert_dir }}/secrets_encryption.yaml" + register: secret_file_encoded + when: secrets_encryption_file.stat.exists + +- name: Base 64 Decode slurped secrets_encryption.yaml file + set_fact: + secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}" + when: secrets_encryption_file.stat.exists + +- name: Extract secret value from secrets_encryption.yaml + set_fact: + kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}" + when: secrets_encryption_file.stat.exists + +- name: Set kube_encrypt_token across master nodes + set_fact: + kube_encrypt_token: "{{ kube_encrypt_token_extracted }}" + delegate_to: "{{ item }}" + delegate_facts: true + with_inventory_hostnames: kube_control_plane + when: kube_encrypt_token_extracted is defined + +- name: Write secrets for encrypting secret data at rest + template: + src: secrets_encryption.yaml.j2 + dest: "{{ kube_cert_dir }}/secrets_encryption.yaml" + owner: root + group: "{{ kube_cert_group }}" + mode: 0640 + tags: + - kube-apiserver diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml new file mode 100644 index 0000000..36bb627 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml @@ -0,0 +1,28 @@ +--- +- name: Backup old certs and keys + copy: + src: "{{ kube_cert_dir }}/{{ item }}" + dest: "{{ kube_cert_dir }}/{{ item }}.old" + mode: preserve + remote_src: yes + with_items: + - apiserver.crt + - apiserver.key + - apiserver-kubelet-client.crt + - apiserver-kubelet-client.key + - front-proxy-client.crt + - front-proxy-client.key + ignore_errors: true # noqa ignore-errors + +- name: Backup old confs + copy: + src: "{{ kube_config_dir }}/{{ item }}" + dest: "{{ kube_config_dir }}/{{ item }}.old" + mode: preserve + remote_src: yes + with_items: + - admin.conf + - controller-manager.conf + - kubelet.conf + - scheduler.conf + ignore_errors: true # noqa ignore-errors diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-etcd.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-etcd.yml new file mode 100644 index 0000000..ae47354 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-etcd.yml @@ -0,0 +1,26 @@ +--- +- name: Calculate etcd cert serial + command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial" + register: "etcd_client_cert_serial_result" + changed_when: false + tags: + - network + +- name: Set etcd_client_cert_serial + set_fact: + etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" + tags: + - network + +- name: Ensure etcdctl script is installed + import_role: + name: etcdctl + when: etcd_deployment_type == "kubeadm" + +- name: Set ownership for etcd data directory + file: + path: "{{ etcd_data_dir }}" + owner: "{{ etcd_owner }}" + group: "{{ etcd_owner }}" + mode: 0700 + when: etcd_deployment_type == "kubeadm" diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml new file mode 100644 index 0000000..8f2f38e --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml @@ -0,0 +1,24 @@ +--- + +- name: Update server field in component kubeconfigs + lineinfile: + dest: "{{ kube_config_dir }}/{{ item }}" + regexp: '^ server: https' + line: ' server: {{ kube_apiserver_endpoint }}' + backup: yes + with_items: + - admin.conf + - controller-manager.conf + - kubelet.conf + - scheduler.conf + notify: + - "Master | Restart kube-controller-manager" + - "Master | Restart kube-scheduler" + - "Master | reload kubelet" + +- name: Update etcd-servers for apiserver + lineinfile: + dest: "{{ kube_config_dir }}/manifests/kube-apiserver.yaml" + regexp: '^ - --etcd-servers=' + line: ' - --etcd-servers={{ etcd_access_addresses }}' + when: etcd_deployment_type != "kubeadm" diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml new file mode 100644 index 0000000..a4869fe --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -0,0 +1,79 @@ +--- +- name: Set kubeadm_discovery_address + set_fact: + kubeadm_discovery_address: >- + {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} + {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- else -%} + {{ kube_apiserver_endpoint | regex_replace('https://', '') }} + {%- endif %} + tags: + - facts + +- name: Upload certificates so they are fresh and not expired + command: >- + {{ bin_dir }}/kubeadm init phase + --config {{ kube_config_dir }}/kubeadm-config.yaml + upload-certs + --upload-certs + register: kubeadm_upload_cert + when: + - inventory_hostname == first_kube_control_plane + - not kube_external_ca_mode + +- name: Parse certificate key if not set + set_fact: + kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}" + run_once: yes + when: + - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined + - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped + +- name: Create kubeadm ControlPlane config + template: + src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml" + mode: 0640 + backup: yes + when: + - inventory_hostname != first_kube_control_plane + - not kubeadm_already_run.stat.exists + +- name: Wait for k8s apiserver + wait_for: + host: "{{ kubeadm_discovery_address.split(':')[0] }}" + port: "{{ kubeadm_discovery_address.split(':')[1] }}" + timeout: 180 + + +- name: check already run + debug: + msg: "{{ kubeadm_already_run.stat.exists }}" + +- name: Reset cert directory + shell: >- + if [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then + {{ bin_dir }}/kubeadm reset -f --cert-dir {{ kube_cert_dir }}; + fi + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + when: + - inventory_hostname != first_kube_control_plane + - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists + - not kube_external_ca_mode + +- name: Joining control plane node to the cluster. + command: >- + {{ bin_dir }}/kubeadm join + --config {{ kube_config_dir }}/kubeadm-controlplane.yaml + --ignore-preflight-errors=all + --skip-phases={{ kubeadm_join_phases_skip | join(',') }} + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + register: kubeadm_join_control_plane + retries: 3 + throttle: 1 + until: kubeadm_join_control_plane is succeeded + when: + - inventory_hostname != first_kube_control_plane + - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml new file mode 100644 index 0000000..d9f7304 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -0,0 +1,248 @@ +--- +- name: Install OIDC certificate + copy: + content: "{{ kube_oidc_ca_cert | b64decode }}" + dest: "{{ kube_oidc_ca_file }}" + owner: root + group: root + mode: "0644" + when: + - kube_oidc_auth + - kube_oidc_ca_cert is defined + +- name: kubeadm | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + get_attributes: no + get_checksum: no + get_mime: no + register: kubeadm_already_run + +- name: kubeadm | Backup kubeadm certs / kubeconfig + import_tasks: kubeadm-backup.yml + when: + - kubeadm_already_run.stat.exists + +- name: kubeadm | aggregate all SANs + set_fact: + apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}" + vars: + sans_base: + - "kubernetes" + - "kubernetes.default" + - "kubernetes.default.svc" + - "kubernetes.default.svc.{{ dns_domain }}" + - "{{ kube_apiserver_ip }}" + - "localhost" + - "127.0.0.1" + sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}" + sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}" + sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}" + sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}" + sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}" + sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}" + sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}" + sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}" + sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}" + sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}" + tags: facts + +- name: Create audit-policy directory + file: + path: "{{ audit_policy_file | dirname }}" + state: directory + mode: 0640 + when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false) + +- name: Write api audit policy yaml + template: + src: apiserver-audit-policy.yaml.j2 + dest: "{{ audit_policy_file }}" + mode: 0640 + when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false) + +- name: Write api audit webhook config yaml + template: + src: apiserver-audit-webhook-config.yaml.j2 + dest: "{{ audit_webhook_config_file }}" + mode: 0640 + when: kubernetes_audit_webhook|default(false) + +# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint. +- name: set kubeadm_config_api_fqdn define + set_fact: + kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}" + when: loadbalancer_apiserver is defined + +- name: Set kubeadm api version to v1beta3 + set_fact: + kubeadmConfig_api_version: v1beta3 + +- name: kubeadm | Create kubeadm config + template: + src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/kubeadm-config.yaml" + mode: 0640 + +- name: kubeadm | Create directory to store admission control configurations + file: + path: "{{ kube_config_dir }}/admission-controls" + state: directory + mode: 0640 + when: kube_apiserver_admission_control_config_file + +- name: kubeadm | Push admission control config file + template: + src: "admission-controls.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml" + mode: 0640 + when: kube_apiserver_admission_control_config_file + +- name: kubeadm | Push admission control config files + template: + src: "{{ item|lower }}.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml" + mode: 0640 + when: + - kube_apiserver_admission_control_config_file + - item in kube_apiserver_admission_plugins_needs_configuration + loop: "{{ kube_apiserver_enable_admission_plugins }}" + +- name: kubeadm | Check if apiserver.crt contains all needed SANs + shell: | + set -o pipefail + for IP in {{ apiserver_ips | join(' ') }}; do + openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkip $IP | grep -q 'does match certificate' || echo 'NEED-RENEW' + done + for HOST in {{ apiserver_hosts | join(' ') }}; do + openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkhost $HOST | grep -q 'does match certificate' || echo 'NEED-RENEW' + done + vars: + apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}" + apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}" + args: + executable: /bin/bash + register: apiserver_sans_check + changed_when: "'NEED-RENEW' in apiserver_sans_check.stdout" + when: + - kubeadm_already_run.stat.exists + - not kube_external_ca_mode + +- name: kubeadm | regenerate apiserver cert 1/2 + file: + state: absent + path: "{{ kube_cert_dir }}/{{ item }}" + with_items: + - apiserver.crt + - apiserver.key + when: + - kubeadm_already_run.stat.exists + - apiserver_sans_check.changed + - not kube_external_ca_mode + +- name: kubeadm | regenerate apiserver cert 2/2 + command: >- + {{ bin_dir }}/kubeadm + init phase certs apiserver + --config={{ kube_config_dir }}/kubeadm-config.yaml + when: + - kubeadm_already_run.stat.exists + - apiserver_sans_check.changed + - not kube_external_ca_mode + +- name: kubeadm | Create directory to store kubeadm patches + file: + path: "{{ kubeadm_patches.dest_dir }}" + state: directory + mode: 0640 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: kubeadm | Copy kubeadm patches from inventory files + copy: + src: "{{ kubeadm_patches.source_dir }}/" + dest: "{{ kubeadm_patches.dest_dir }}" + owner: "root" + mode: 0644 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: kubeadm | Initialize first master + command: >- + timeout -k 300s 300s + {{ bin_dir }}/kubeadm init + --config={{ kube_config_dir }}/kubeadm-config.yaml + --ignore-preflight-errors=all + --skip-phases={{ kubeadm_init_phases_skip | join(',') }} + {{ kube_external_ca_mode | ternary('', '--upload-certs') }} + register: kubeadm_init + # Retry is because upload config sometimes fails + retries: 3 + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + when: inventory_hostname == first_kube_control_plane and not kubeadm_already_run.stat.exists + failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + notify: Master | restart kubelet + +- name: set kubeadm certificate key + set_fact: + kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}" + with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}" + when: + - kubeadm_certificate_key is not defined + - (item | trim) is match('.*--certificate-key.*') + +- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined) + shell: >- + {{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :; + {{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }} + changed_when: false + when: + - inventory_hostname == first_kube_control_plane + - kubeadm_token is defined + - kubeadm_refresh_token + tags: + - kubeadm_token + +- name: Create kubeadm token for joining nodes with 24h expiration (default) + command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create" + changed_when: false + register: temp_token + retries: 5 + delay: 5 + until: temp_token is succeeded + delegate_to: "{{ first_kube_control_plane }}" + when: kubeadm_token is not defined + tags: + - kubeadm_token + +- name: Set kubeadm_token + set_fact: + kubeadm_token: "{{ temp_token.stdout }}" + when: temp_token.stdout is defined + tags: + - kubeadm_token + +- name: PodSecurityPolicy | install PodSecurityPolicy + include_tasks: psp-install.yml + when: + - podsecuritypolicy_enabled + - inventory_hostname == first_kube_control_plane + +- name: kubeadm | Join other masters + include_tasks: kubeadm-secondary.yml + +- name: kubeadm | upgrade kubernetes cluster + include_tasks: kubeadm-upgrade.yml + when: + - upgrade_cluster_setup + - kubeadm_already_run.stat.exists + +# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. +- name: kubeadm | Remove taint for master with node role + command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}" + delegate_to: "{{ first_kube_control_plane }}" + with_items: + - "node-role.kubernetes.io/master:NoSchedule-" + - "node-role.kubernetes.io/control-plane:NoSchedule-" + when: inventory_hostname in groups['kube_node'] + failed_when: false diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml new file mode 100644 index 0000000..711a2e0 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -0,0 +1,75 @@ +--- +- name: kubeadm | Check api is up + uri: + url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz" + validate_certs: false + when: inventory_hostname in groups['kube_control_plane'] + register: _result + retries: 60 + delay: 5 + until: _result.status == 200 + +- name: kubeadm | Upgrade first master + command: >- + timeout -k 600s 600s + {{ bin_dir }}/kubeadm + upgrade apply -y {{ kube_version }} + --certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }} + --config={{ kube_config_dir }}/kubeadm-config.yaml + --ignore-preflight-errors=all + --allow-experimental-upgrades + --etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }} + --force + register: kubeadm_upgrade + # Retry is because upload config sometimes fails + retries: 3 + until: kubeadm_upgrade.rc == 0 + when: inventory_hostname == first_kube_control_plane + failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + notify: Master | restart kubelet + +- name: kubeadm | Upgrade other masters + command: >- + timeout -k 600s 600s + {{ bin_dir }}/kubeadm + upgrade apply -y {{ kube_version }} + --certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }} + --config={{ kube_config_dir }}/kubeadm-config.yaml + --ignore-preflight-errors=all + --allow-experimental-upgrades + --etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }} + --force + register: kubeadm_upgrade + when: inventory_hostname != first_kube_control_plane + failed_when: + - kubeadm_upgrade.rc != 0 + - '"field is immutable" not in kubeadm_upgrade.stderr' + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" + notify: Master | restart kubelet + +- name: kubeadm | clean kubectl cache to refresh api types + file: + path: "{{ item }}" + state: absent + with_items: + - /root/.kube/cache + - /root/.kube/http-cache + +# FIXME: https://github.com/kubernetes/kubeadm/issues/1318 +- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode + command: >- + {{ kubectl }} + -n kube-system + scale deployment/coredns --replicas 0 + register: scale_down_coredns + retries: 6 + delay: 5 + until: scale_down_coredns is succeeded + run_once: yes + when: + - kubeadm_scale_down_coredns_enabled + - dns_mode not in ['coredns', 'coredns_dual'] + changed_when: false diff --git a/kubespray/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml b/kubespray/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml new file mode 100644 index 0000000..7d0c1a0 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml @@ -0,0 +1,18 @@ +--- +- name: Fixup kubelet client cert rotation 1/2 + lineinfile: + path: "{{ kube_config_dir }}/kubelet.conf" + regexp: '^ client-certificate-data: ' + line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem' + backup: yes + notify: + - "Master | reload kubelet" + +- name: Fixup kubelet client cert rotation 2/2 + lineinfile: + path: "{{ kube_config_dir }}/kubelet.conf" + regexp: '^ client-key-data: ' + line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem' + backup: yes + notify: + - "Master | reload kubelet" diff --git a/kubespray/roles/kubernetes/control-plane/tasks/main.yml b/kubespray/roles/kubernetes/control-plane/tasks/main.yml new file mode 100644 index 0000000..bd8029a --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/main.yml @@ -0,0 +1,104 @@ +--- +- import_tasks: pre-upgrade.yml + tags: + - k8s-pre-upgrade + +- name: Create webhook token auth config + template: + src: webhook-token-auth-config.yaml.j2 + dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml" + mode: 0640 + when: kube_webhook_token_auth|default(false) + +- name: Create webhook authorization config + template: + src: webhook-authorization-config.yaml.j2 + dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml" + mode: 0640 + when: kube_webhook_authorization|default(false) + +- name: Create kube-scheduler config + template: + src: kubescheduler-config.yaml.j2 + dest: "{{ kube_config_dir }}/kubescheduler-config.yaml" + mode: 0644 + +- import_tasks: encrypt-at-rest.yml + when: + - kube_encrypt_secret_data + +- name: Install | Copy kubectl binary from download dir + copy: + src: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubectl" + mode: 0755 + remote_src: true + tags: + - kubectl + - upgrade + +- name: Install kubectl bash completion + shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh" + when: ansible_os_family in ["Debian","RedHat"] + tags: + - kubectl + ignore_errors: true # noqa ignore-errors + +- name: Set kubectl bash completion file permissions + file: + path: /etc/bash_completion.d/kubectl.sh + owner: root + group: root + mode: 0755 + when: ansible_os_family in ["Debian","RedHat"] + tags: + - kubectl + - upgrade + ignore_errors: true # noqa ignore-errors + +- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy + set_fact: + kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}" + when: podsecuritypolicy_enabled + +- name: Define nodes already joined to existing cluster and first_kube_control_plane + import_tasks: define-first-kube-control.yml + +- name: Include kubeadm setup + import_tasks: kubeadm-setup.yml + +- name: Include kubeadm etcd extra tasks + include_tasks: kubeadm-etcd.yml + when: etcd_deployment_type == "kubeadm" + +- name: Include kubeadm secondary server apiserver fixes + include_tasks: kubeadm-fix-apiserver.yml + +- name: Include kubelet client cert rotation fixes + include_tasks: kubelet-fix-client-cert-rotation.yml + when: kubelet_rotate_certificates + +- name: Install script to renew K8S control plane certificates + template: + src: k8s-certs-renew.sh.j2 + dest: "{{ bin_dir }}/k8s-certs-renew.sh" + mode: 0755 + +- name: Renew K8S control plane certificates monthly 1/2 + template: + src: "{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + mode: 0644 + with_items: + - k8s-certs-renew.service + - k8s-certs-renew.timer + register: k8s_certs_units + when: auto_renew_certificates + +- name: Renew K8S control plane certificates monthly 2/2 + systemd: + name: k8s-certs-renew.timer + enabled: yes + state: started + daemon-reload: "{{ k8s_certs_units is changed }}" + when: auto_renew_certificates diff --git a/kubespray/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/kubespray/roles/kubernetes/control-plane/tasks/pre-upgrade.yml new file mode 100644 index 0000000..27c04ea --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/pre-upgrade.yml @@ -0,0 +1,21 @@ +--- +- name: "Pre-upgrade | Delete master manifests if etcd secrets changed" + file: + path: "/etc/kubernetes/manifests/{{ item }}.manifest" + state: absent + with_items: + - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] + register: kube_apiserver_manifest_replaced + when: etcd_secret_changed|default(false) + +- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503 + shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f" + args: + executable: /bin/bash + with_items: + - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] + when: kube_apiserver_manifest_replaced.changed + register: remove_master_container + retries: 10 + until: remove_master_container.rc == 0 + delay: 1 diff --git a/kubespray/roles/kubernetes/control-plane/tasks/psp-install.yml b/kubespray/roles/kubernetes/control-plane/tasks/psp-install.yml new file mode 100644 index 0000000..581d128 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/tasks/psp-install.yml @@ -0,0 +1,38 @@ +--- +- name: Check AppArmor status + command: which apparmor_parser + register: apparmor_status + failed_when: false + changed_when: apparmor_status.rc != 0 + +- name: Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + +- name: Render templates for PodSecurityPolicy + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0640 + register: psp_manifests + with_items: + - {file: psp.yml, type: psp, name: psp} + - {file: psp-cr.yml, type: clusterrole, name: psp-cr} + - {file: psp-crb.yml, type: rolebinding, name: psp-crb} + +- name: Add policies, roles, bindings for PodSecurityPolicy + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + register: result + until: result is succeeded + retries: 10 + delay: 6 + with_items: "{{ psp_manifests.results }}" + environment: + KUBECONFIG: "{{ kube_config_dir }}/admin.conf" + loop_control: + label: "{{ item.item.file }}" \ No newline at end of file diff --git a/kubespray/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 new file mode 100644 index 0000000..34f5f18 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 @@ -0,0 +1,9 @@ +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +{% for plugin in kube_apiserver_enable_admission_plugins %} +{% if plugin in kube_apiserver_admission_plugins_needs_configuration %} +- name: {{ plugin }} + path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml +{% endif %} +{% endfor %} diff --git a/kubespray/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2 new file mode 100644 index 0000000..ca7bcf8 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/apiserver-audit-policy.yaml.j2 @@ -0,0 +1,129 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +{% if audit_policy_custom_rules is defined and audit_policy_custom_rules != "" %} +{{ audit_policy_custom_rules | indent(2, true) }} +{% else %} + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + # Ingress controller reads `configmaps/ingress-uid` through the unsecured port. + # TODO(#46983): Change this to the ingress controller service account. + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps", "serviceaccounts/token"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get responses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" +{% endif %} diff --git a/kubespray/roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2 new file mode 100644 index 0000000..cd8208e --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Config +clusters: +- cluster: + server: {{ audit_webhook_server_url }} +{% for key in audit_webhook_server_extra_args %} + {{ key }}: "{{ audit_webhook_server_extra_args[key] }}" +{% endfor %} + name: auditsink +contexts: +- context: + cluster: auditsink + user: "" + name: default-context +current-context: default-context +preferences: {} +users: [] diff --git a/kubespray/roles/kubernetes/control-plane/templates/eventratelimit.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/eventratelimit.yaml.j2 new file mode 100644 index 0000000..0d78670 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/eventratelimit.yaml.j2 @@ -0,0 +1,11 @@ +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +{% for limit in kube_apiserver_admission_event_rate_limits.values() %} +- type: {{ limit.type }} + qps: {{ limit.qps }} + burst: {{ limit.burst }} +{% if limit.cache_size is defined %} + cacheSize: {{ limit.cache_size }} +{% endif %} +{% endfor %} diff --git a/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2 b/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2 new file mode 100644 index 0000000..64610c2 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2 @@ -0,0 +1,6 @@ +[Unit] +Description=Renew K8S control plane certificates + +[Service] +Type=oneshot +ExecStart={{ bin_dir }}/k8s-certs-renew.sh diff --git a/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.sh.j2 b/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.sh.j2 new file mode 100644 index 0000000..53bb825 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.sh.j2 @@ -0,0 +1,23 @@ +#!/bin/bash + +echo "## Expiration before renewal ##" +{{ bin_dir }}/kubeadm certs check-expiration + +echo "## Renewing certificates managed by kubeadm ##" +{{ bin_dir }}/kubeadm certs renew all + +echo "## Restarting control plane pods managed by kubeadm ##" +{% if container_manager == "docker" %} +{{ docker_bin_dir }}/docker ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs {{ docker_bin_dir }}/docker rm -f +{% else %} +{{ bin_dir }}/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs {{ bin_dir }}/crictl rmp -f +{% endif %} + +echo "## Updating /root/.kube/config ##" +cp {{ kube_config_dir }}/admin.conf /root/.kube/config + +echo "## Waiting for apiserver to be up again ##" +until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done + +echo "## Expiration after renewal ##" +{{ bin_dir }}/kubeadm certs check-expiration diff --git a/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 b/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 new file mode 100644 index 0000000..904f007 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 @@ -0,0 +1,8 @@ +[Unit] +Description=Timer to renew K8S control plane certificates + +[Timer] +OnCalendar={{ auto_renew_certificates_systemd_calendar }} + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 new file mode 100644 index 0000000..2fbd553 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 @@ -0,0 +1,453 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +{% if kubeadm_token is defined %} +bootstrapTokens: +- token: "{{ kubeadm_token }}" + description: "kubespray kubeadm bootstrap token" + ttl: "24h" +{% endif %} +localAPIEndpoint: + advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }} + bindPort: {{ kube_apiserver_port }} +{% if kubeadm_certificate_key is defined %} +certificateKey: {{ kubeadm_certificate_key }} +{% endif %} +nodeRegistration: +{% if kube_override_hostname|default('') %} + name: {{ kube_override_hostname }} +{% endif %} +{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +{% else %} + taints: [] +{% endif %} + criSocket: {{ cri_socket }} +{% if cloud_provider is defined and cloud_provider in ["external"] %} + kubeletExtraArgs: + cloud-provider: external +{% endif %} +{% if kubeadm_patches is defined and kubeadm_patches.enabled %} +patches: + directory: {{ kubeadm_patches.dest_dir }} +{% endif %} +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +clusterName: {{ cluster_name }} +etcd: +{% if etcd_deployment_type != "kubeadm" %} + external: + endpoints: +{% for endpoint in etcd_access_addresses.split(',') %} + - {{ endpoint }} +{% endfor %} + caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }} + certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }} + keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }} +{% elif etcd_deployment_type == "kubeadm" %} + local: + imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}" + imageTag: "{{ etcd_image_tag }}" + dataDir: "{{ etcd_data_dir }}" + extraArgs: + metrics: {{ etcd_metrics }} + election-timeout: "{{ etcd_election_timeout }}" + heartbeat-interval: "{{ etcd_heartbeat_interval }}" + auto-compaction-retention: "{{ etcd_compaction_retention }}" +{% if etcd_snapshot_count is defined %} + snapshot-count: "{{ etcd_snapshot_count }}" +{% endif %} +{% if etcd_quota_backend_bytes is defined %} + quota-backend-bytes: "{{ etcd_quota_backend_bytes }}" +{% endif %} +{% if etcd_max_request_bytes is defined %} + max-request-bytes: "{{ etcd_max_request_bytes }}" +{% endif %} +{% if etcd_log_level is defined %} + log-level: "{{ etcd_log_level }}" +{% endif %} +{% for key, value in etcd_extra_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} + serverCertSANs: +{% for san in etcd_cert_alt_names %} + - {{ san }} +{% endfor %} +{% for san in etcd_cert_alt_ips %} + - {{ san }} +{% endfor %} + peerCertSANs: +{% for san in etcd_cert_alt_names %} + - {{ san }} +{% endfor %} +{% for san in etcd_cert_alt_ips %} + - {{ san }} +{% endfor %} +{% endif %} +dns: + imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }} + imageTag: {{ coredns_image_tag }} +networking: + dnsDomain: {{ dns_domain }} + serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" +{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} + podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +{% endif %} +{% if kubeadm_feature_gates %} +featureGates: +{% for feature in kubeadm_feature_gates %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} +kubernetesVersion: {{ kube_version }} +{% if kubeadm_config_api_fqdn is defined %} +controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +{% else %} +controlPlaneEndpoint: {{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }} +{% endif %} +certificatesDir: {{ kube_cert_dir }} +imageRepository: {{ kube_image_repo }} +apiServer: + extraArgs: +{% if kube_apiserver_pod_eviction_not_ready_timeout_seconds is defined %} + default-not-ready-toleration-seconds: "{{ kube_apiserver_pod_eviction_not_ready_timeout_seconds }}" +{% endif %} +{% if kube_apiserver_pod_eviction_unreachable_timeout_seconds is defined %} + default-unreachable-toleration-seconds: "{{ kube_apiserver_pod_eviction_unreachable_timeout_seconds }}" +{% endif %} +{% if kube_api_anonymous_auth is defined %} + anonymous-auth: "{{ kube_api_anonymous_auth }}" +{% endif %} + authorization-mode: {{ authorization_modes | join(',') }} + bind-address: {{ kube_apiserver_bind_address }} +{% if kube_apiserver_enable_admission_plugins|length > 0 %} + enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }} +{% endif %} +{% if kube_apiserver_admission_control_config_file %} + admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml +{% endif %} +{% if kube_apiserver_disable_admission_plugins|length > 0 %} + disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }} +{% endif %} + apiserver-count: "{{ kube_apiserver_count }}" + endpoint-reconciler-type: lease +{% if etcd_events_cluster_enabled %} + etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}" +{% endif %} + service-node-port-range: {{ kube_apiserver_node_port_range }} + service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" + kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" + profiling: "{{ kube_profiling }}" + request-timeout: "{{ kube_apiserver_request_timeout }}" + enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" +{% if kube_token_auth|default(true) %} + token-auth-file: {{ kube_token_dir }}/known_tokens.csv +{% endif %} +{% if kube_apiserver_service_account_lookup %} + service-account-lookup: "{{ kube_apiserver_service_account_lookup }}" +{% endif %} +{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} + oidc-issuer-url: "{{ kube_oidc_url }}" + oidc-client-id: "{{ kube_oidc_client_id }}" +{% if kube_oidc_ca_file is defined %} + oidc-ca-file: "{{ kube_oidc_ca_file }}" +{% endif %} +{% if kube_oidc_username_claim is defined %} + oidc-username-claim: "{{ kube_oidc_username_claim }}" +{% endif %} +{% if kube_oidc_groups_claim is defined %} + oidc-groups-claim: "{{ kube_oidc_groups_claim }}" +{% endif %} +{% if kube_oidc_username_prefix is defined %} + oidc-username-prefix: "{{ kube_oidc_username_prefix }}" +{% endif %} +{% if kube_oidc_groups_prefix is defined %} + oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}" +{% endif %} +{% endif %} +{% if kube_webhook_token_auth|default(false) %} + authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml +{% endif %} +{% if kube_webhook_authorization|default(false) %} + authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml +{% endif %} +{% if kube_encrypt_secret_data %} + encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml +{% endif %} + storage-backend: {{ kube_apiserver_storage_backend }} +{% if kube_api_runtime_config|length > 0 %} + runtime-config: {{ kube_api_runtime_config | join(',') }} +{% endif %} + allow-privileged: "true" +{% if kubernetes_audit or kubernetes_audit_webhook %} + audit-policy-file: {{ audit_policy_file }} +{% endif %} +{% if kubernetes_audit %} + audit-log-path: "{{ audit_log_path }}" + audit-log-maxage: "{{ audit_log_maxage }}" + audit-log-maxbackup: "{{ audit_log_maxbackups }}" + audit-log-maxsize: "{{ audit_log_maxsize }}" +{% endif %} +{% if kubernetes_audit_webhook %} + audit-webhook-config-file: {{ audit_webhook_config_file }} + audit-webhook-mode: {{ audit_webhook_mode }} +{% if audit_webhook_mode == "batch" %} + audit-webhook-batch-max-size: "{{ audit_webhook_batch_max_size }}" + audit-webhook-batch-max-wait: "{{ audit_webhook_batch_max_wait }}" +{% endif %} +{% endif %} +{% for key in kube_kubeadm_apiserver_extra_args %} + {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}" +{% endfor %} +{% if kube_apiserver_feature_gates or kube_feature_gates %} + feature-gates: "{{ kube_apiserver_feature_gates | default(kube_feature_gates, true) | join(',') }}" +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + cloud-provider: {{ cloud_provider }} + cloud-config: {{ kube_config_dir }}/cloud_config +{% endif %} +{% if tls_min_version is defined %} + tls-min-version: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} + tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} + +{% endif %} +{% if event_ttl_duration is defined %} + event-ttl: {{ event_ttl_duration }} +{% endif %} +{% if kubelet_rotate_server_certificates %} + kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt +{% endif %} +{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %} + extraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }}/cloud_config + mountPath: {{ kube_config_dir }}/cloud_config +{% endif %} +{% if kube_token_auth|default(true) %} + - name: token-auth-config + hostPath: {{ kube_token_dir }} + mountPath: {{ kube_token_dir }} +{% endif %} +{% if kube_webhook_token_auth|default(false) %} + - name: webhook-token-auth-config + hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml + mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml +{% endif %} +{% if kube_webhook_authorization|default(false) %} + - name: webhook-authorization-config + hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml + mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml +{% endif %} +{% if kubernetes_audit or kubernetes_audit_webhook %} + - name: {{ audit_policy_name }} + hostPath: {{ audit_policy_hostpath }} + mountPath: {{ audit_policy_mountpath }} +{% if audit_log_path != "-" %} + - name: {{ audit_log_name }} + hostPath: {{ audit_log_hostpath }} + mountPath: {{ audit_log_mountpath }} + readOnly: false +{% endif %} +{% endif %} +{% if kube_apiserver_admission_control_config_file %} + - name: admission-control-configs + hostPath: {{ kube_config_dir }}/admission-controls + mountPath: {{ kube_config_dir }} + readOnly: false + pathType: DirectoryOrCreate +{% endif %} +{% for volume in apiserver_extra_volumes %} + - name: {{ volume.name }} + hostPath: {{ volume.hostPath }} + mountPath: {{ volume.mountPath }} + readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} +{% endfor %} +{% if ssl_ca_dirs|length %} +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: {{ dir }} + mountPath: {{ dir }} + readOnly: true +{% endfor %} +{% endif %} +{% endif %} + certSANs: +{% for san in apiserver_sans %} + - {{ san }} +{% endfor %} + timeoutForControlPlane: 5m0s +controllerManager: + extraArgs: + node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} + node-monitor-period: {{ kube_controller_node_monitor_period }} +{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} + cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +{% endif %} + service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}" +{% if enable_dual_stack_networks %} + node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}" + node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}" +{% else %} + node-cidr-mask-size: "{{ kube_network_node_prefix }}" +{% endif %} + profiling: "{{ kube_profiling }}" + terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}" + bind-address: {{ kube_controller_manager_bind_address }} + leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }} + leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }} +{% if kube_controller_feature_gates or kube_feature_gates %} + feature-gates: "{{ kube_controller_feature_gates | default(kube_feature_gates, true) | join(',') }}" +{% endif %} +{% for key in kube_kubeadm_controller_extra_args %} + {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}" +{% endfor %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + cloud-provider: {{ cloud_provider }} + cloud-config: {{ kube_config_dir }}/cloud_config +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin not in ["cloud"] %} + configure-cloud-routes: "false" +{% endif %} +{% if kubelet_flexvolumes_plugins_dir is defined %} + flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}} +{% endif %} +{% if tls_min_version is defined %} + tls-min-version: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} + tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} + +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] or controller_manager_extra_volumes %} + extraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %} + - name: openstackcacert + hostPath: "{{ kube_config_dir }}/openstack-cacert.pem" + mountPath: "{{ kube_config_dir }}/openstack-cacert.pem" +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }}/cloud_config + mountPath: {{ kube_config_dir }}/cloud_config +{% endif %} +{% for volume in controller_manager_extra_volumes %} + - name: {{ volume.name }} + hostPath: {{ volume.hostPath }} + mountPath: {{ volume.mountPath }} + readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} +{% endfor %} +{% endif %} +scheduler: + extraArgs: + bind-address: {{ kube_scheduler_bind_address }} + config: {{ kube_config_dir }}/kubescheduler-config.yaml +{% if kube_scheduler_feature_gates or kube_feature_gates %} + feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}" +{% endif %} +{% if kube_kubeadm_scheduler_extra_args|length > 0 %} +{% for key in kube_kubeadm_scheduler_extra_args %} + {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" +{% endfor %} +{% endif %} +{% if tls_min_version is defined %} + tls-min-version: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} + tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %} + +{% endif %} + extraVolumes: + - name: kubescheduler-config + hostPath: {{ kube_config_dir }}/kubescheduler-config.yaml + mountPath: {{ kube_config_dir }}/kubescheduler-config.yaml + readOnly: true +{% if scheduler_extra_volumes %} +{% for volume in scheduler_extra_volumes %} + - name: {{ volume.name }} + hostPath: {{ volume.hostPath }} + mountPath: {{ volume.mountPath }} + readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }} +{% endfor %} +{% endif %} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +bindAddress: {{ kube_proxy_bind_address }} +clientConnection: + acceptContentTypes: {{ kube_proxy_client_accept_content_types }} + burst: {{ kube_proxy_client_burst }} + contentType: {{ kube_proxy_client_content_type }} + kubeconfig: {{ kube_proxy_client_kubeconfig }} + qps: {{ kube_proxy_client_qps }} +{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %} +clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}" +{% endif %} +configSyncPeriod: {{ kube_proxy_config_sync_period }} +conntrack: + maxPerCore: {{ kube_proxy_conntrack_max_per_core }} + min: {{ kube_proxy_conntrack_min }} + tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} + tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} +enableProfiling: {{ kube_proxy_enable_profiling }} +healthzBindAddress: {{ kube_proxy_healthz_bind_address }} +hostnameOverride: {{ kube_override_hostname }} +iptables: + masqueradeAll: {{ kube_proxy_masquerade_all }} + masqueradeBit: {{ kube_proxy_masquerade_bit }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + syncPeriod: {{ kube_proxy_sync_period }} +ipvs: + excludeCIDRs: {{ kube_proxy_exclude_cidrs }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + scheduler: {{ kube_proxy_scheduler }} + syncPeriod: {{ kube_proxy_sync_period }} + strictARP: {{ kube_proxy_strict_arp }} + tcpTimeout: {{ kube_proxy_tcp_timeout }} + tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }} + udpTimeout: {{ kube_proxy_udp_timeout }} +metricsBindAddress: {{ kube_proxy_metrics_bind_address }} +mode: {{ kube_proxy_mode }} +nodePortAddresses: {{ kube_proxy_nodeport_addresses }} +oomScoreAdj: {{ kube_proxy_oom_score_adj }} +portRange: {{ kube_proxy_port_range }} +udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }} +{% if kube_proxy_feature_gates or kube_feature_gates %} +{% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %} +featureGates: +{% for feature in feature_gates %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} +{# DNS settings for kubelet #} +{% if enable_nodelocaldns %} +{% set kubelet_cluster_dns = [nodelocaldns_ip] %} +{% elif dns_mode in ['coredns'] %} +{% set kubelet_cluster_dns = [skydns_server] %} +{% elif dns_mode == 'coredns_dual' %} +{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %} +{% elif dns_mode == 'manual' %} +{% set kubelet_cluster_dns = [manual_dns_server] %} +{% else %} +{% set kubelet_cluster_dns = [] %} +{% endif %} +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +clusterDNS: +{% for dns_address in kubelet_cluster_dns %} +- {{ dns_address }} +{% endfor %} +{% if kubelet_feature_gates or kube_feature_gates %} +{% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %} +featureGates: +{% for feature in feature_gates %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} diff --git a/kubespray/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 new file mode 100644 index 0000000..b4b3c5e --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 @@ -0,0 +1,34 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +discovery: + bootstrapToken: +{% if kubeadm_config_api_fqdn is defined %} + apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +{% else %} + apiServerEndpoint: {{ kubeadm_discovery_address }} +{% endif %} + token: {{ kubeadm_token }} + unsafeSkipCAVerification: true + timeout: {{ discovery_timeout }} + tlsBootstrapToken: {{ kubeadm_token }} +controlPlane: + localAPIEndpoint: + advertiseAddress: {{ kube_apiserver_address }} + bindPort: {{ kube_apiserver_port }} + certificateKey: {{ kubeadm_certificate_key }} +nodeRegistration: + name: {{ kube_override_hostname|default(inventory_hostname) }} + criSocket: {{ cri_socket }} +{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +{% else %} + taints: [] +{% endif %} +{% if kubeadm_patches is defined and kubeadm_patches.enabled %} +patches: + directory: {{ kubeadm_patches.dest_dir }} +{% endif %} \ No newline at end of file diff --git a/kubespray/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 new file mode 100644 index 0000000..be41418 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 @@ -0,0 +1,25 @@ +{% set kubescheduler_config_api_version = "v1beta3" %} +apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }} +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "{{ kube_config_dir }}/scheduler.conf" +{% for key in kube_scheduler_client_conn_extra_opts %} + {{ key }}: {{ kube_scheduler_client_conn_extra_opts[key] }} +{% endfor %} +{% if kube_scheduler_extenders %} +extenders: +{{ kube_scheduler_extenders | to_nice_yaml(indent=2, width=256) }} +{% endif %} +leaderElection: + leaseDuration: {{ kube_scheduler_leader_elect_lease_duration }} + renewDeadline: {{ kube_scheduler_leader_elect_renew_deadline }} +{% for key in kube_scheduler_leader_elect_extra_opts %} + {{ key }}: {{ kube_scheduler_leader_elect_extra_opts[key] }} +{% endfor %} +{% if kube_scheduler_profiles %} +profiles: +{{ kube_scheduler_profiles | to_nice_yaml(indent=2, width=256) }} +{% endif %} +{% for key in kube_scheduler_config_extra_opts %} +{{ key }}: {{ kube_scheduler_config_extra_opts[key] }} +{% endfor %} diff --git a/kubespray/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 new file mode 100644 index 0000000..5d39576 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 @@ -0,0 +1,17 @@ +{% if kube_pod_security_use_default %} +apiVersion: pod-security.admission.config.k8s.io/v1beta1 +kind: PodSecurityConfiguration +defaults: + enforce: "{{ kube_pod_security_default_enforce }}" + enforce-version: "{{ kube_pod_security_default_enforce_version }}" + audit: "{{ kube_pod_security_default_audit }}" + audit-version: "{{ kube_pod_security_default_audit_version }}" + warn: "{{ kube_pod_security_default_warn }}" + warn-version: "{{ kube_pod_security_default_warn_version }}" +exemptions: + usernames: {{ kube_pod_security_exemptions_usernames|to_json }} + runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }} + namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }} +{% else %} +# This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }} +{% endif %} diff --git a/kubespray/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 b/kubespray/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 new file mode 100644 index 0000000..d9f0e8d --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:privileged + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - policy + resourceNames: + - privileged + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:restricted + labels: + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - policy + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use diff --git a/kubespray/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 b/kubespray/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 new file mode 100644 index 0000000..7513c3c --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 @@ -0,0 +1,54 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: psp:any:restricted +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted +subjects: +- kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp:kube-system:privileged + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:privileged +subjects: +- kind: Group + name: system:masters + apiGroup: rbac.authorization.k8s.io +- kind: Group + name: system:serviceaccounts:kube-system + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp:nodes:privileged + namespace: kube-system + annotations: + kubernetes.io/description: 'Allow nodes to create privileged pods. Should + be used in combination with the NodeRestriction admission plugin to limit + nodes to mirror pods bound to themselves.' + labels: + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:privileged +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:nodes + - kind: User + apiGroup: rbac.authorization.k8s.io + # Legacy node ID + name: kubelet diff --git a/kubespray/roles/kubernetes/control-plane/templates/psp.yml.j2 b/kubespray/roles/kubernetes/control-plane/templates/psp.yml.j2 new file mode 100644 index 0000000..5da5400 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/psp.yml.j2 @@ -0,0 +1,27 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' +{% if apparmor_enabled %} + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' +{% endif %} + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + {{ podsecuritypolicy_restricted_spec | to_yaml(indent=2, width=1337) | indent(width=2) }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + {{ podsecuritypolicy_privileged_spec | to_yaml(indent=2, width=1337) | indent(width=2) }} diff --git a/kubespray/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 new file mode 100644 index 0000000..9105bb6 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 @@ -0,0 +1,11 @@ +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: +{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }} + providers: + - {{ kube_encryption_algorithm }}: + keys: + - name: key + secret: {{ kube_encrypt_token | b64encode }} + - identity: {} diff --git a/kubespray/roles/kubernetes/control-plane/templates/webhook-authorization-config.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/webhook-authorization-config.yaml.j2 new file mode 100644 index 0000000..b5b5530 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/webhook-authorization-config.yaml.j2 @@ -0,0 +1,18 @@ +# clusters refers to the remote service. +clusters: +- name: webhook-token-authz-cluster + cluster: + server: {{ kube_webhook_authorization_url }} + insecure-skip-tls-verify: {{ kube_webhook_authorization_url_skip_tls_verify }} + +# users refers to the API server's webhook configuration. +users: +- name: webhook-token-authz-user + +# kubeconfig files require a context. Provide one for the API server. +current-context: webhook-token-authz +contexts: +- context: + cluster: webhook-token-authz-cluster + user: webhook-token-authz-user + name: webhook-token-authz diff --git a/kubespray/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 b/kubespray/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 new file mode 100644 index 0000000..f152d11 --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 @@ -0,0 +1,21 @@ +# clusters refers to the remote service. +clusters: +- name: webhook-token-auth-cluster + cluster: + server: {{ kube_webhook_token_auth_url }} + insecure-skip-tls-verify: {{ kube_webhook_token_auth_url_skip_tls_verify }} +{% if kube_webhook_token_auth_ca_data is defined %} + certificate-authority-data: {{ kube_webhook_token_auth_ca_data }} +{% endif %} + +# users refers to the API server's webhook configuration. +users: +- name: webhook-token-auth-user + +# kubeconfig files require a context. Provide one for the API server. +current-context: webhook-token-auth +contexts: +- context: + cluster: webhook-token-auth-cluster + user: webhook-token-auth-user + name: webhook-token-auth diff --git a/kubespray/roles/kubernetes/control-plane/vars/main.yaml b/kubespray/roles/kubernetes/control-plane/vars/main.yaml new file mode 100644 index 0000000..f888d6b --- /dev/null +++ b/kubespray/roles/kubernetes/control-plane/vars/main.yaml @@ -0,0 +1,3 @@ +--- +# list of admission plugins that needs to be configured +kube_apiserver_admission_plugins_needs_configuration: [EventRateLimit, PodSecurity] diff --git a/kubespray/roles/kubernetes/kubeadm/defaults/main.yml b/kubespray/roles/kubernetes/kubeadm/defaults/main.yml new file mode 100644 index 0000000..0449b8a --- /dev/null +++ b/kubespray/roles/kubernetes/kubeadm/defaults/main.yml @@ -0,0 +1,12 @@ +--- +# discovery_timeout modifies the discovery timeout +# This value must be smaller than kubeadm_join_timeout +discovery_timeout: 60s +kubeadm_join_timeout: 120s + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} diff --git a/kubespray/roles/kubernetes/kubeadm/handlers/main.yml b/kubespray/roles/kubernetes/kubeadm/handlers/main.yml new file mode 100644 index 0000000..4c2b125 --- /dev/null +++ b/kubespray/roles/kubernetes/kubeadm/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: Kubeadm | restart kubelet + command: /bin/true + notify: + - Kubeadm | reload systemd + - Kubeadm | reload kubelet + +- name: Kubeadm | reload systemd + systemd: + daemon_reload: true + +- name: Kubeadm | reload kubelet + service: + name: kubelet + state: restarted diff --git a/kubespray/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/kubespray/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml new file mode 100644 index 0000000..c87b840 --- /dev/null +++ b/kubespray/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml @@ -0,0 +1,61 @@ +--- +- name: Parse certificate key if not set + set_fact: + kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_certificate_key'] }}" + when: kubeadm_certificate_key is undefined + +- name: Pull control plane certs down + shell: >- + {{ bin_dir }}/kubeadm join phase + control-plane-prepare download-certs + --certificate-key {{ kubeadm_certificate_key }} + --control-plane + --token {{ kubeadm_token }} + --discovery-token-unsafe-skip-ca-verification + {{ kubeadm_discovery_address }} + && + {{ bin_dir }}/kubeadm join phase + control-plane-prepare certs + --control-plane + --token {{ kubeadm_token }} + --discovery-token-unsafe-skip-ca-verification + {{ kubeadm_discovery_address }} + args: + creates: "{{ kube_cert_dir }}/apiserver-etcd-client.key" + +- name: Delete unneeded certificates + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ kube_cert_dir }}/apiserver.crt" + - "{{ kube_cert_dir }}/apiserver.key" + - "{{ kube_cert_dir }}/ca.key" + - "{{ kube_cert_dir }}/etcd/ca.key" + - "{{ kube_cert_dir }}/etcd/healthcheck-client.crt" + - "{{ kube_cert_dir }}/etcd/healthcheck-client.key" + - "{{ kube_cert_dir }}/etcd/peer.crt" + - "{{ kube_cert_dir }}/etcd/peer.key" + - "{{ kube_cert_dir }}/etcd/server.crt" + - "{{ kube_cert_dir }}/etcd/server.key" + - "{{ kube_cert_dir }}/front-proxy-ca.crt" + - "{{ kube_cert_dir }}/front-proxy-ca.key" + - "{{ kube_cert_dir }}/front-proxy-client.crt" + - "{{ kube_cert_dir }}/front-proxy-client.key" + - "{{ kube_cert_dir }}/sa.key" + - "{{ kube_cert_dir }}/sa.pub" + +- name: Calculate etcd cert serial + command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial" + register: "etcd_client_cert_serial_result" + changed_when: false + when: + - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort + tags: + - network + +- name: Set etcd_client_cert_serial + set_fact: + etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" + tags: + - network diff --git a/kubespray/roles/kubernetes/kubeadm/tasks/main.yml b/kubespray/roles/kubernetes/kubeadm/tasks/main.yml new file mode 100644 index 0000000..a3cc862 --- /dev/null +++ b/kubespray/roles/kubernetes/kubeadm/tasks/main.yml @@ -0,0 +1,176 @@ +--- +- name: Set kubeadm_discovery_address + set_fact: + kubeadm_discovery_address: >- + {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%} + {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- else -%} + {{ kube_apiserver_endpoint | replace("https://", "") }} + {%- endif %} + tags: + - facts + +- name: Check if kubelet.conf exists + stat: + path: "{{ kube_config_dir }}/kubelet.conf" + get_attributes: no + get_checksum: no + get_mime: no + register: kubelet_conf + +- name: Check if kubeadm CA cert is accessible + stat: + path: "{{ kube_cert_dir }}/ca.crt" + get_attributes: no + get_checksum: no + get_mime: no + register: kubeadm_ca_stat + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + +- name: Calculate kubeadm CA cert hash + shell: set -o pipefail && openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' + args: + executable: /bin/bash + register: kubeadm_ca_hash + when: + - kubeadm_ca_stat.stat is defined + - kubeadm_ca_stat.stat.exists + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + changed_when: false + +- name: Create kubeadm token for joining nodes with 24h expiration (default) + command: "{{ bin_dir }}/kubeadm token create" + register: temp_token + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kubeadm_token is not defined + changed_when: false + +- name: Set kubeadm_token to generated token + set_fact: + kubeadm_token: "{{ temp_token.stdout }}" + when: kubeadm_token is not defined + +- name: Set kubeadm api version to v1beta3 + set_fact: + kubeadmConfig_api_version: v1beta3 + +- name: Create kubeadm client config + template: + src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2" + dest: "{{ kube_config_dir }}/kubeadm-client.conf" + backup: yes + mode: 0640 + when: not is_kube_master + +- name: kubeadm | Create directory to store kubeadm patches + file: + path: "{{ kubeadm_patches.dest_dir }}" + state: directory + mode: 0640 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: kubeadm | Copy kubeadm patches from inventory files + copy: + src: "{{ kubeadm_patches.source_dir }}/" + dest: "{{ kubeadm_patches.dest_dir }}" + owner: "root" + mode: 0644 + when: kubeadm_patches is defined and kubeadm_patches.enabled + +- name: Join to cluster if needed + environment: + PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin" + when: not is_kube_master and (not kubelet_conf.stat.exists) + block: + + - name: Join to cluster + command: >- + timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }} + {{ bin_dir }}/kubeadm join + --config {{ kube_config_dir }}/kubeadm-client.conf + --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests + --skip-phases={{ kubeadm_join_phases_skip | join(',') }} + register: kubeadm_join + changed_when: kubeadm_join is success + + rescue: + + - name: Join to cluster with ignores + command: >- + timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }} + {{ bin_dir }}/kubeadm join + --config {{ kube_config_dir }}/kubeadm-client.conf + --ignore-preflight-errors=all + --skip-phases={{ kubeadm_join_phases_skip | join(',') }} + register: kubeadm_join + changed_when: kubeadm_join is success + + always: + + - name: Display kubeadm join stderr if any + when: kubeadm_join is failed + debug: + msg: | + Joined with warnings + {{ kubeadm_join.stderr_lines }} + +- name: Update server field in kubelet kubeconfig + lineinfile: + dest: "{{ kube_config_dir }}/kubelet.conf" + regexp: 'server:' + line: ' server: {{ kube_apiserver_endpoint }}' + backup: yes + when: + - kubeadm_config_api_fqdn is not defined + - not is_kube_master + - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") + notify: Kubeadm | restart kubelet + +# FIXME(mattymo): Need to point to localhost, otherwise masters will all point +# incorrectly to first master, creating SPoF. +- name: Update server field in kube-proxy kubeconfig + shell: >- + set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml + | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' + | {{ kubectl }} replace -f - + args: + executable: /bin/bash + run_once: true + delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_facts: false + when: + - kubeadm_config_api_fqdn is not defined + - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") + - kube_proxy_deployed + - loadbalancer_apiserver_localhost + tags: + - kube-proxy + +- name: Set ca.crt file permission + file: + path: "{{ kube_cert_dir }}/ca.crt" + owner: root + group: root + mode: "0644" + +- name: Restart all kube-proxy pods to ensure that they load the new configmap + command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" + run_once: true + delegate_to: "{{ groups['kube_control_plane']|first }}" + delegate_facts: false + when: + - kubeadm_config_api_fqdn is not defined + - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") + - kube_proxy_deployed + tags: + - kube-proxy + +- name: Extract etcd certs from control plane if using etcd kubeadm mode + include_tasks: kubeadm_etcd_node.yml + when: + - etcd_deployment_type == "kubeadm" + - inventory_hostname not in groups['kube_control_plane'] + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" diff --git a/kubespray/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 b/kubespray/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 new file mode 100644 index 0000000..64c3db9 --- /dev/null +++ b/kubespray/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +discovery: + bootstrapToken: +{% if kubeadm_config_api_fqdn is defined %} + apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} +{% else %} + apiServerEndpoint: {{ kubeadm_discovery_address }} +{% endif %} + token: {{ kubeadm_token }} +{% if kubeadm_ca_hash.stdout is defined %} + caCertHashes: + - sha256:{{ kubeadm_ca_hash.stdout }} +{% else %} + unsafeSkipCAVerification: true +{% endif %} + timeout: {{ discovery_timeout }} + tlsBootstrapToken: {{ kubeadm_token }} +caCertPath: {{ kube_cert_dir }}/ca.crt +nodeRegistration: + name: '{{ kube_override_hostname }}' + criSocket: {{ cri_socket }} +{% if 'calico_rr' in group_names and 'kube_node' not in group_names %} + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/calico-rr +{% endif %} +{% if kubeadm_patches is defined and kubeadm_patches.enabled %} +patches: + directory: {{ kubeadm_patches.dest_dir }} +{% endif %} diff --git a/kubespray/roles/kubernetes/node-label/tasks/main.yml b/kubespray/roles/kubernetes/node-label/tasks/main.yml new file mode 100644 index 0000000..f91e7f4 --- /dev/null +++ b/kubespray/roles/kubernetes/node-label/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Kubernetes Apps | Wait for kube-apiserver + uri: + url: "{{ kube_apiserver_endpoint }}/healthz" + validate_certs: no + client_cert: "{{ kube_apiserver_client_cert }}" + client_key: "{{ kube_apiserver_client_key }}" + register: result + until: result.status == 200 + retries: 10 + delay: 6 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Set role node label to empty list + set_fact: + role_node_labels: [] + +- name: Node label for nvidia GPU nodes + set_fact: + role_node_labels: "{{ role_node_labels + [ 'nvidia.com/gpu=true' ] }}" + when: + - nvidia_gpu_nodes is defined + - nvidia_accelerator_enabled|bool + - inventory_hostname in nvidia_gpu_nodes + +- name: Set inventory node label to empty list + set_fact: + inventory_node_labels: [] + +- name: Populate inventory node label + set_fact: + inventory_node_labels: "{{ inventory_node_labels + [ '%s=%s'|format(item.key, item.value) ] }}" + loop: "{{ node_labels|d({})|dict2items }}" + when: + - node_labels is defined + - node_labels is mapping + +- debug: # noqa unnamed-task + var: role_node_labels +- debug: # noqa unnamed-task + var: inventory_node_labels + +- name: Set label to node + command: >- + {{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true + loop: "{{ role_node_labels + inventory_node_labels }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + changed_when: false +... diff --git a/kubespray/roles/kubernetes/node/defaults/main.yml b/kubespray/roles/kubernetes/node/defaults/main.yml new file mode 100644 index 0000000..8be6174 --- /dev/null +++ b/kubespray/roles/kubernetes/node/defaults/main.yml @@ -0,0 +1,235 @@ +--- +# advertised host IP for kubelet. This affects network plugin config. Take caution +kubelet_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}" + +# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces +kubelet_bind_address: "{{ ip | default('0.0.0.0') }}" + +# resolv.conf to base dns config +kube_resolv_conf: "/etc/resolv.conf" + +# Set to empty to avoid cgroup creation +kubelet_enforce_node_allocatable: "\"\"" + +# Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +kubelet_runtime_cgroups: "/systemd/system.slice" +kubelet_kubelet_cgroups: "/systemd/system.slice" + +# Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +kubelet_runtime_cgroups_cgroupfs: "/system.slice/containerd.service" +kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +### fail with swap on (default true) +kubelet_fail_swap_on: true + +# Set systemd service hardening features +kubelet_systemd_hardening: false + +# List of secure IPs for kubelet +kubelet_secure_addresses: >- + {%- for host in groups['kube_control_plane'] -%} + {{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ ' ' if not loop.last else '' }} + {%- endfor -%} + +# Reserve this space for kube resources +kube_memory_reserved: 256Mi +kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" +# Reservation for master hosts +kube_master_memory_reserved: 512Mi +kube_master_cpu_reserved: 200m +# kube_master_ephemeral_storage_reserved: 2Gi +# kube_master_pid_reserved: "1000" + +# Set to true to reserve resources for system daemons +system_reserved: false +system_memory_reserved: 512Mi +system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +# system_pid_reserved: "1000" +# Reservation for master hosts +system_master_memory_reserved: 256Mi +system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi +# system_master_pid_reserved: "1000" + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +eviction_hard: {} +eviction_hard_control_plane: {} + +kubelet_status_update_frequency: 10s + +# kube-vip +kube_vip_version: v0.5.5 + +kube_vip_arp_enabled: false +kube_vip_interface: +kube_vip_services_interface: +kube_vip_cidr: 32 +kube_vip_controlplane_enabled: false +kube_vip_ddns_enabled: false +kube_vip_services_enabled: false +kube_vip_leader_election_enabled: "{{ kube_vip_arp_enabled }}" +kube_vip_bgp_enabled: false +kube_vip_bgp_routerid: +kube_vip_local_as: 65000 +kube_vip_bgp_peeraddress: +kube_vip_bgp_peerpass: +kube_vip_bgp_peeras: 65000 +kube_vip_bgppeers: +kube_vip_address: + +# Requests for load balancer app +loadbalancer_apiserver_memory_requests: 32M +loadbalancer_apiserver_cpu_requests: 25m + +loadbalancer_apiserver_keepalive_timeout: 5m + +# Uncomment if you need to enable deprecated runtimes +# kube_api_runtime_config: +# - apps/v1beta1=true +# - apps/v1beta2=true +# - extensions/v1beta1/daemonsets=true +# - extensions/v1beta1/deployments=true +# - extensions/v1beta1/replicasets=true +# - extensions/v1beta1/networkpolicies=true +# - extensions/v1beta1/podsecuritypolicies=true + +# A port range to reserve for services with NodePort visibility. +# Inclusive at both ends of the range. +kube_apiserver_node_port_range: "30000-32767" + +# Configure the amount of pods able to run on single node +# default is equal to application default +kubelet_max_pods: 110 + +# Sets the maximum number of processes running per Pod +# Default value -1 = unlimited +kubelet_pod_pids_limit: -1 + +## Support parameters to be passed to kubelet via kubelet-config.yaml +kubelet_config_extra_args: {} + +## Parameters to be passed to kubelet via kubelet-config.yaml when cgroupfs is used as cgroup driver +kubelet_config_extra_args_cgroupfs: + systemCgroups: /system.slice + cgroupRoot: / + +## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters +kubelet_node_config_extra_args: {} + +# Maximum number of container log files that can be present for a container. +kubelet_logfiles_max_nr: 5 + +# Maximum size of the container log file before it is rotated +kubelet_logfiles_max_size: 10Mi + +## Support custom flags to be passed to kubelet +kubelet_custom_flags: [] + +## Support custom flags to be passed to kubelet only on nodes, not masters +kubelet_node_custom_flags: [] + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} + +# The read-only port for the Kubelet to serve on with no authentication/authorization. +kube_read_only_port: 0 + +# Port for healthz for Kubelet +kubelet_healthz_port: 10248 + +# Bind address for healthz for Kubelet +kubelet_healthz_bind_address: 127.0.0.1 + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +# For the openstack integration kubelet will need credentials to access +# openstack apis like nova and cinder. Per default this values will be +# read from the environment. +openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +openstack_username: "{{ lookup('env','OS_USERNAME') }}" +openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}" +openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" +openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" + +# For the vsphere integration, kubelet will need credentials to access +# vsphere apis +# Documentation regarding these values can be found +# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 +vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" +vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" +vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" +vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" +vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}" +vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}" +vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}" +vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}" +vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" + +vsphere_scsi_controller_type: pvscsi +# vsphere_public_network is name of the network the VMs are joined to +vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" + +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values +# azure_tenant_id: +# azure_subscription_id: +# azure_aad_client_id: +# azure_aad_client_secret: +# azure_resource_group: +# azure_location: +# azure_subnet_name: +# azure_security_group_name: +# azure_vnet_name: +# azure_route_table_name: +# supported values are 'standard' or 'vmss' +# azure_vmtype: standard +# Sku of Load Balancer and Public IP. Candidate values are: basic and standard. +azure_loadbalancer_sku: basic +# excludes master nodes from standard load balancer. +azure_exclude_master_from_standard_lb: true +# disables the outbound SNAT for public load balancer rules +azure_disable_outbound_snat: false +# use instance metadata service where possible +azure_use_instance_metadata: true +# use specific Azure API endpoints +azure_cloud: AzurePublicCloud + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA diff --git a/kubespray/roles/kubernetes/node/handlers/main.yml b/kubespray/roles/kubernetes/node/handlers/main.yml new file mode 100644 index 0000000..512b4e8 --- /dev/null +++ b/kubespray/roles/kubernetes/node/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: Node | restart kubelet + command: /bin/true + notify: + - Kubelet | reload systemd + - Kubelet | restart kubelet + +- name: Kubelet | reload systemd + systemd: + daemon_reload: true + +- name: Kubelet | restart kubelet + service: + name: kubelet + state: restarted diff --git a/kubespray/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/kubespray/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml new file mode 100644 index 0000000..62337fc --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml @@ -0,0 +1,82 @@ +--- +- name: check azure_tenant_id value + fail: + msg: "azure_tenant_id is missing" + when: azure_tenant_id is not defined or not azure_tenant_id + +- name: check azure_subscription_id value + fail: + msg: "azure_subscription_id is missing" + when: azure_subscription_id is not defined or not azure_subscription_id + +- name: check azure_aad_client_id value + fail: + msg: "azure_aad_client_id is missing" + when: azure_aad_client_id is not defined or not azure_aad_client_id + +- name: check azure_aad_client_secret value + fail: + msg: "azure_aad_client_secret is missing" + when: azure_aad_client_secret is not defined or not azure_aad_client_secret + +- name: check azure_resource_group value + fail: + msg: "azure_resource_group is missing" + when: azure_resource_group is not defined or not azure_resource_group + +- name: check azure_location value + fail: + msg: "azure_location is missing" + when: azure_location is not defined or not azure_location + +- name: check azure_subnet_name value + fail: + msg: "azure_subnet_name is missing" + when: azure_subnet_name is not defined or not azure_subnet_name + +- name: check azure_security_group_name value + fail: + msg: "azure_security_group_name is missing" + when: azure_security_group_name is not defined or not azure_security_group_name + +- name: check azure_vnet_name value + fail: + msg: "azure_vnet_name is missing" + when: azure_vnet_name is not defined or not azure_vnet_name + +- name: check azure_vnet_resource_group value + fail: + msg: "azure_vnet_resource_group is missing" + when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group + +- name: check azure_route_table_name value + fail: + msg: "azure_route_table_name is missing" + when: azure_route_table_name is not defined or not azure_route_table_name + +- name: check azure_loadbalancer_sku value + fail: + msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'" + when: azure_loadbalancer_sku not in ["basic", "standard"] + +- name: "check azure_exclude_master_from_standard_lb is a bool" + assert: + that: azure_exclude_master_from_standard_lb |type_debug == 'bool' + +- name: "check azure_disable_outbound_snat is a bool" + assert: + that: azure_disable_outbound_snat |type_debug == 'bool' + +- name: "check azure_use_instance_metadata is a bool" + assert: + that: azure_use_instance_metadata |type_debug == 'bool' + +- name: check azure_vmtype value + fail: + msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'" + when: azure_vmtype is not defined or not azure_vmtype + +- name: check azure_cloud value + fail: + msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'." + when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"] diff --git a/kubespray/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml b/kubespray/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml new file mode 100644 index 0000000..6ff1732 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml @@ -0,0 +1,34 @@ +--- +- name: check openstack_auth_url value + fail: + msg: "openstack_auth_url is missing" + when: openstack_auth_url is not defined or not openstack_auth_url + +- name: check openstack_username value + fail: + msg: "openstack_username is missing" + when: openstack_username is not defined or not openstack_username + +- name: check openstack_password value + fail: + msg: "openstack_password is missing" + when: openstack_password is not defined or not openstack_password + +- name: check openstack_region value + fail: + msg: "openstack_region is missing" + when: openstack_region is not defined or not openstack_region + +- name: check openstack_tenant_id value + fail: + msg: "one of openstack_tenant_id or openstack_trust_id must be specified" + when: + - openstack_tenant_id is not defined or not openstack_tenant_id + - openstack_trust_id is not defined + +- name: check openstack_trust_id value + fail: + msg: "one of openstack_tenant_id or openstack_trust_id must be specified" + when: + - openstack_trust_id is not defined or not openstack_trust_id + - openstack_tenant_id is not defined diff --git a/kubespray/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml b/kubespray/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml new file mode 100644 index 0000000..873eb71 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml @@ -0,0 +1,22 @@ +--- +- name: check vsphere environment variables + fail: + msg: "{{ item.name }} is missing" + when: item.value is not defined or not item.value + with_items: + - name: vsphere_vcenter_ip + value: "{{ vsphere_vcenter_ip }}" + - name: vsphere_vcenter_port + value: "{{ vsphere_vcenter_port }}" + - name: vsphere_user + value: "{{ vsphere_user }}" + - name: vsphere_password + value: "{{ vsphere_password }}" + - name: vsphere_datacenter + value: "{{ vsphere_datacenter }}" + - name: vsphere_datastore + value: "{{ vsphere_datastore }}" + - name: vsphere_working_dir + value: "{{ vsphere_working_dir }}" + - name: vsphere_insecure + value: "{{ vsphere_insecure }}" diff --git a/kubespray/roles/kubernetes/node/tasks/facts.yml b/kubespray/roles/kubernetes/node/tasks/facts.yml new file mode 100644 index 0000000..97d52e8 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/facts.yml @@ -0,0 +1,57 @@ +--- +- block: + - name: look up docker cgroup driver + shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" + register: docker_cgroup_driver_result + changed_when: false + check_mode: no + + - name: set kubelet_cgroup_driver_detected fact for docker + set_fact: + kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}" + when: container_manager == 'docker' + +- block: + - name: look up crio cgroup driver + shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" + register: crio_cgroup_driver_result + changed_when: false + + - name: set kubelet_cgroup_driver_detected fact for crio + set_fact: + kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}" + when: container_manager == 'crio' + +- name: set kubelet_cgroup_driver_detected fact for containerd + set_fact: + kubelet_cgroup_driver_detected: >- + {%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%} + when: container_manager == 'containerd' + +- name: set kubelet_cgroup_driver + set_fact: + kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}" + when: kubelet_cgroup_driver is undefined + +- name: set kubelet_cgroups options when cgroupfs is used + set_fact: + kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}" + kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}" + when: kubelet_cgroup_driver == 'cgroupfs' + +- name: set kubelet_config_extra_args options when cgroupfs is used + vars: + set_fact: + kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}" + when: kubelet_cgroup_driver == 'cgroupfs' + +- name: os specific vars + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + skip: true diff --git a/kubespray/roles/kubernetes/node/tasks/install.yml b/kubespray/roles/kubernetes/node/tasks/install.yml new file mode 100644 index 0000000..cf7a1d8 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/install.yml @@ -0,0 +1,22 @@ +--- +- name: install | Copy kubeadm binary from download dir + copy: + src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubeadm" + mode: 0755 + remote_src: true + tags: + - kubeadm + when: + - not inventory_hostname in groups['kube_control_plane'] + +- name: install | Copy kubelet binary from download dir + copy: + src: "{{ local_release_dir }}/kubelet-{{ kube_version }}-{{ image_arch }}" + dest: "{{ bin_dir }}/kubelet" + mode: 0755 + remote_src: true + tags: + - kubelet + - upgrade + notify: Node | restart kubelet diff --git a/kubespray/roles/kubernetes/node/tasks/kubelet.yml b/kubespray/roles/kubernetes/node/tasks/kubelet.yml new file mode 100644 index 0000000..c551f77 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/kubelet.yml @@ -0,0 +1,52 @@ +--- +- name: Set kubelet api version to v1beta1 + set_fact: + kubeletConfig_api_version: v1beta1 + tags: + - kubelet + - kubeadm + +- name: Write kubelet environment config file (kubeadm) + template: + src: "kubelet.env.{{ kubeletConfig_api_version }}.j2" + dest: "{{ kube_config_dir }}/kubelet.env" + setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}" + backup: yes + mode: 0640 + notify: Node | restart kubelet + tags: + - kubelet + - kubeadm + +- name: Write kubelet config file + template: + src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/kubelet-config.yaml" + mode: 0640 + notify: Kubelet | restart kubelet + tags: + - kubelet + - kubeadm + +- name: Write kubelet systemd init file + template: + src: "kubelet.service.j2" + dest: "/etc/systemd/system/kubelet.service" + backup: "yes" + mode: 0644 + notify: Node | restart kubelet + tags: + - kubelet + - kubeadm + +- name: flush_handlers and reload-systemd + meta: flush_handlers + +- name: Enable kubelet + service: + name: kubelet + enabled: yes + state: started + tags: + - kubelet + notify: Kubelet | restart kubelet diff --git a/kubespray/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/kubespray/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml new file mode 100644 index 0000000..c8e0108 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml @@ -0,0 +1,34 @@ +--- +- name: haproxy | Cleanup potentially deployed nginx-proxy + file: + path: "{{ kube_manifest_dir }}/nginx-proxy.yml" + state: absent + +- name: haproxy | Make haproxy directory + file: + path: "{{ haproxy_config_dir }}" + state: directory + mode: 0755 + owner: root + +- name: haproxy | Write haproxy configuration + template: + src: "loadbalancer/haproxy.cfg.j2" + dest: "{{ haproxy_config_dir }}/haproxy.cfg" + owner: root + mode: 0755 + backup: yes + +- name: haproxy | Get checksum from config + stat: + path: "{{ haproxy_config_dir }}/haproxy.cfg" + get_attributes: no + get_checksum: yes + get_mime: no + register: haproxy_stat + +- name: haproxy | Write static pod + template: + src: manifests/haproxy.manifest.j2 + dest: "{{ kube_manifest_dir }}/haproxy.yml" + mode: 0640 diff --git a/kubespray/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml b/kubespray/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml new file mode 100644 index 0000000..e12bd9b --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml @@ -0,0 +1,13 @@ +--- +- name: kube-vip | Check cluster settings for kube-vip + fail: + msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md" + when: + - kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp + - kube_vip_arp_enabled + +- name: kube-vip | Write static pod + template: + src: manifests/kube-vip.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-vip.yml" + mode: 0640 diff --git a/kubespray/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/kubespray/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml new file mode 100644 index 0000000..e176cb9 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml @@ -0,0 +1,34 @@ +--- +- name: haproxy | Cleanup potentially deployed haproxy + file: + path: "{{ kube_manifest_dir }}/haproxy.yml" + state: absent + +- name: nginx-proxy | Make nginx directory + file: + path: "{{ nginx_config_dir }}" + state: directory + mode: 0700 + owner: root + +- name: nginx-proxy | Write nginx-proxy configuration + template: + src: "loadbalancer/nginx.conf.j2" + dest: "{{ nginx_config_dir }}/nginx.conf" + owner: root + mode: 0755 + backup: yes + +- name: nginx-proxy | Get checksum from config + stat: + path: "{{ nginx_config_dir }}/nginx.conf" + get_attributes: no + get_checksum: yes + get_mime: no + register: nginx_stat + +- name: nginx-proxy | Write static pod + template: + src: manifests/nginx-proxy.manifest.j2 + dest: "{{ kube_manifest_dir }}/nginx-proxy.yml" + mode: 0640 diff --git a/kubespray/roles/kubernetes/node/tasks/main.yml b/kubespray/roles/kubernetes/node/tasks/main.yml new file mode 100644 index 0000000..59dc300 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/main.yml @@ -0,0 +1,193 @@ +--- +- import_tasks: facts.yml + tags: + - facts + +- import_tasks: pre_upgrade.yml + tags: + - kubelet + +- name: Ensure /var/lib/cni exists + file: + path: /var/lib/cni + state: directory + mode: 0755 + +- import_tasks: install.yml + tags: + - kubelet + +- import_tasks: loadbalancer/kube-vip.yml + when: + - is_kube_master + - kube_vip_enabled + tags: + - kube-vip + +- import_tasks: loadbalancer/nginx-proxy.yml + when: + - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0' + - loadbalancer_apiserver_localhost + - loadbalancer_apiserver_type == 'nginx' + tags: + - nginx + +- import_tasks: loadbalancer/haproxy.yml + when: + - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0' + - loadbalancer_apiserver_localhost + - loadbalancer_apiserver_type == 'haproxy' + tags: + - haproxy + +- name: Ensure nodePort range is reserved + sysctl: + name: net.ipv4.ip_local_reserved_ports + value: "{{ kube_apiserver_node_port_range }}" + sysctl_set: yes + sysctl_file: "{{ sysctl_file_path }}" + state: present + reload: yes + when: kube_apiserver_node_port_range is defined + tags: + - kube-proxy + +- name: Verify if br_netfilter module exists + command: "modinfo br_netfilter" + environment: + PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + check_mode: no + +- name: Verify br_netfilter module path exists + file: + path: /etc/modules-load.d + state: directory + mode: 0755 + +- name: Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +- name: Persist br_netfilter module + copy: + dest: /etc/modules-load.d/kubespray-br_netfilter.conf + content: br_netfilter + mode: 0644 + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module +- name: Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + check_mode: no + register: sysctl_bridge_nf_call_iptables + +- name: Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + sysctl_file: "{{ sysctl_file_path }}" + value: "1" + reload: yes + when: sysctl_bridge_nf_call_iptables.rc == 0 + with_items: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + +- name: Modprobe Kernel Module for IPVS + modprobe: + name: "{{ item }}" + state: present + with_items: + - ip_vs + - ip_vs_rr + - ip_vs_wrr + - ip_vs_sh + when: kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + +- name: Modprobe nf_conntrack_ipv4 + modprobe: + name: nf_conntrack_ipv4 + state: present + register: modprobe_nf_conntrack_ipv4 + ignore_errors: true # noqa ignore-errors + when: + - kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + +- name: Persist ip_vs modules + copy: + dest: /etc/modules-load.d/kube_proxy-ipvs.conf + mode: 0644 + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + {% if modprobe_nf_conntrack_ipv4 is success -%} + nf_conntrack_ipv4 + {%- endif -%} + when: kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + +- include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml" + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + tags: + - cloud-provider + - facts + +- name: Test if openstack_cacert is a base64 string + set_fact: + openstack_cacert_is_base64: "{% if openstack_cacert is search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}" + when: + - cloud_provider is defined + - cloud_provider == 'openstack' + - openstack_cacert is defined + - openstack_cacert | length > 0 + + +- name: Write cacert file + copy: + src: "{{ openstack_cacert if not openstack_cacert_is_base64 else omit }}" + content: "{{ openstack_cacert | b64decode if openstack_cacert_is_base64 else omit }}" + dest: "{{ kube_config_dir }}/openstack-cacert.pem" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider == 'openstack' + - openstack_cacert is defined + - openstack_cacert | length > 0 + tags: + - cloud-provider + +- name: Write cloud-config + template: + src: "cloud-configs/{{ cloud_provider }}-cloud-config.j2" + dest: "{{ kube_config_dir }}/cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws', 'gce' ] + notify: Node | restart kubelet + tags: + - cloud-provider + +- import_tasks: kubelet.yml + tags: + - kubelet + - kubeadm diff --git a/kubespray/roles/kubernetes/node/tasks/pre_upgrade.yml b/kubespray/roles/kubernetes/node/tasks/pre_upgrade.yml new file mode 100644 index 0000000..d9c2d07 --- /dev/null +++ b/kubespray/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -0,0 +1,48 @@ +--- +- name: "Pre-upgrade | check if kubelet container exists" + shell: >- + set -o pipefail && + {% if container_manager in ['crio', 'docker'] %} + {{ docker_bin_dir }}/docker ps -af name=kubelet | grep kubelet + {% elif container_manager == 'containerd' %} + {{ bin_dir }}/crictl ps --all --name kubelet | grep kubelet + {% endif %} + args: + executable: /bin/bash + failed_when: false + changed_when: false + check_mode: no + register: kubelet_container_check + +- name: "Pre-upgrade | copy /var/lib/cni from kubelet" + command: >- + {% if container_manager in ['crio', 'docker'] %} + docker cp kubelet:/var/lib/cni /var/lib/cni + {% elif container_manager == 'containerd' %} + ctr run --rm --mount type=bind,src=/var/lib/cni,dst=/cnilibdir,options=rbind:rw kubelet kubelet-tmp sh -c 'cp /var/lib/cni/* /cnilibdir/' + {% endif %} + args: + creates: "/var/lib/cni" + failed_when: false + when: kubelet_container_check.rc == 0 + +- name: "Pre-upgrade | ensure kubelet container service is stopped if using host deployment" + service: + name: kubelet + state: stopped + when: kubelet_container_check.rc == 0 + +- name: "Pre-upgrade | ensure kubelet container is removed if using host deployment" + shell: >- + {% if container_manager in ['crio', 'docker'] %} + {{ docker_bin_dir }}/docker rm -fv kubelet + {% elif container_manager == 'containerd' %} + {{ bin_dir }}/crictl stop kubelet && {{ bin_dir }}/crictl rm kubelet + {% endif %} + failed_when: false + changed_when: false + register: remove_kubelet_container + retries: 4 + until: remove_kubelet_container.rc == 0 + delay: 5 + when: kubelet_container_check.rc == 0 diff --git a/kubespray/roles/kubernetes/node/templates/cloud-configs/aws-cloud-config.j2 b/kubespray/roles/kubernetes/node/templates/cloud-configs/aws-cloud-config.j2 new file mode 100644 index 0000000..f6d0c3d --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/cloud-configs/aws-cloud-config.j2 @@ -0,0 +1,11 @@ +[Global] +zone={{ aws_zone|default("") }} +vpc={{ aws_vpc|default("") }} +subnetId={{ aws_subnet_id|default("") }} +routeTableId={{ aws_route_table_id|default("") }} +roleArn={{ aws_role_arn|default("") }} +kubernetesClusterTag={{ aws_kubernetes_cluster_tag|default("") }} +kubernetesClusterId={{ aws_kubernetes_cluster_id|default("") }} +disableSecurityGroupIngress={{ "true" if aws_disable_security_group_ingress|default(False) else "false" }} +disableStrictZoneCheck={{ "true" if aws_disable_strict_zone_check|default(False) else "false" }} +elbSecurityGroup={{ aws_elb_security_group|default("") }} diff --git a/kubespray/roles/kubernetes/node/templates/cloud-configs/azure-cloud-config.j2 b/kubespray/roles/kubernetes/node/templates/cloud-configs/azure-cloud-config.j2 new file mode 100644 index 0000000..2b1c101 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/cloud-configs/azure-cloud-config.j2 @@ -0,0 +1,26 @@ +{ + "cloud": "{{ azure_cloud }}", + "tenantId": "{{ azure_tenant_id }}", + "subscriptionId": "{{ azure_subscription_id }}", + "aadClientId": "{{ azure_aad_client_id }}", + "aadClientSecret": "{{ azure_aad_client_secret }}", + "resourceGroup": "{{ azure_resource_group }}", + "location": "{{ azure_location }}", + "subnetName": "{{ azure_subnet_name }}", + "securityGroupName": "{{ azure_security_group_name }}", + "securityGroupResourceGroup": "{{ azure_security_group_resource_group | default(azure_vnet_resource_group) }}", + "vnetName": "{{ azure_vnet_name }}", + "vnetResourceGroup": "{{ azure_vnet_resource_group }}", + "routeTableName": "{{ azure_route_table_name }}", + "routeTableResourceGroup": "{{ azure_route_table_resource_group | default(azure_vnet_resource_group) }}", + "vmType": "{{ azure_vmtype }}", +{% if azure_primary_availability_set_name is defined %} + "primaryAvailabilitySetName": "{{ azure_primary_availability_set_name }}", +{%endif%} + "useInstanceMetadata": {{azure_use_instance_metadata | lower }}, +{% if azure_loadbalancer_sku == "standard" %} + "excludeMasterFromStandardLB": {{ azure_exclude_master_from_standard_lb | lower }}, + "disableOutboundSNAT": {{ azure_disable_outbound_snat | lower }}, +{% endif%} + "loadBalancerSku": "{{ azure_loadbalancer_sku }}" +} diff --git a/kubespray/roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2 b/kubespray/roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2 new file mode 100644 index 0000000..f4cac50 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2 @@ -0,0 +1,3 @@ +[global] +node-tags = {{ gce_node_tags }} + diff --git a/kubespray/roles/kubernetes/node/templates/cloud-configs/openstack-cloud-config.j2 b/kubespray/roles/kubernetes/node/templates/cloud-configs/openstack-cloud-config.j2 new file mode 100644 index 0000000..b1f8e0a --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/cloud-configs/openstack-cloud-config.j2 @@ -0,0 +1,54 @@ +[Global] +auth-url="{{ openstack_auth_url }}" +username="{{ openstack_username }}" +password="{{ openstack_password }}" +region="{{ openstack_region }}" +{% if openstack_trust_id is defined and openstack_trust_id != "" %} +trust-id="{{ openstack_trust_id }}" +{% else %} +tenant-id="{{ openstack_tenant_id }}" +{% endif %} +{% if openstack_tenant_name is defined and openstack_tenant_name != "" %} +tenant-name="{{ openstack_tenant_name }}" +{% endif %} +{% if openstack_domain_name is defined and openstack_domain_name != "" %} +domain-name="{{ openstack_domain_name }}" +{% elif openstack_domain_id is defined and openstack_domain_id != "" %} +domain-id ="{{ openstack_domain_id }}" +{% endif %} +{% if openstack_cacert is defined and openstack_cacert != "" %} +ca-file="{{ kube_config_dir }}/openstack-cacert.pem" +{% endif %} + +[BlockStorage] +{% if openstack_blockstorage_version is defined %} +bs-version={{ openstack_blockstorage_version }} +{% endif %} +{% if openstack_blockstorage_ignore_volume_az is defined and openstack_blockstorage_ignore_volume_az|bool %} +ignore-volume-az={{ openstack_blockstorage_ignore_volume_az }} +{% endif %} +{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %} +node-volume-attach-limit="{{ node_volume_attach_limit }}" +{% endif %} + +{% if openstack_lbaas_enabled and openstack_lbaas_subnet_id is defined %} +[LoadBalancer] +subnet-id={{ openstack_lbaas_subnet_id }} +{% if openstack_lbaas_floating_network_id is defined %} +floating-network-id={{ openstack_lbaas_floating_network_id }} +{% endif %} +{% if openstack_lbaas_use_octavia is defined %} +use-octavia={{ openstack_lbaas_use_octavia }} +{% endif %} +{% if openstack_lbaas_method is defined %} +lb-method={{ openstack_lbaas_method }} +{% endif %} +{% if openstack_lbaas_provider is defined %} +lb-provider={{ openstack_lbaas_provider }} +{% endif %} + +create-monitor={{ openstack_lbaas_create_monitor }} +monitor-delay={{ openstack_lbaas_monitor_delay }} +monitor-timeout={{ openstack_lbaas_monitor_timeout }} +monitor-max-retries={{ openstack_lbaas_monitor_max_retries }} +{% endif %} diff --git a/kubespray/roles/kubernetes/node/templates/cloud-configs/vsphere-cloud-config.j2 b/kubespray/roles/kubernetes/node/templates/cloud-configs/vsphere-cloud-config.j2 new file mode 100644 index 0000000..2cda7f6 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/cloud-configs/vsphere-cloud-config.j2 @@ -0,0 +1,36 @@ +[Global] +user = "{{ vsphere_user }}" +password = "{{ vsphere_password }}" +port = {{ vsphere_vcenter_port }} +insecure-flag = {{ vsphere_insecure }} + +datacenters = "{{ vsphere_datacenter }}" + +[VirtualCenter "{{ vsphere_vcenter_ip }}"] + + +[Workspace] +server = "{{ vsphere_vcenter_ip }}" +datacenter = "{{ vsphere_datacenter }}" +folder = "{{ vsphere_working_dir }}" +default-datastore = "{{ vsphere_datastore }}" +{% if vsphere_resource_pool is defined and vsphere_resource_pool != "" %} +resourcepool-path = "{{ vsphere_resource_pool }}" +{% endif %} + + +[Disk] +scsicontrollertype = {{ vsphere_scsi_controller_type }} + +{% if vsphere_public_network is defined and vsphere_public_network != "" %} +[Network] +public-network = {{ vsphere_public_network }} +{% endif %} + +[Labels] +{% if vsphere_zone_category is defined and vsphere_zone_category != "" %} +zone = {{ vsphere_zone_category }} +{% endif %} +{% if vsphere_region_category is defined and vsphere_region_category != "" %} +region = {{ vsphere_region_category }} +{% endif %} diff --git a/kubespray/roles/kubernetes/node/templates/http-proxy.conf.j2 b/kubespray/roles/kubernetes/node/templates/http-proxy.conf.j2 new file mode 100644 index 0000000..e790477 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/http-proxy.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %} diff --git a/kubespray/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/kubespray/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 new file mode 100644 index 0000000..9982f62 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 @@ -0,0 +1,151 @@ +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +nodeStatusUpdateFrequency: "{{ kubelet_status_update_frequency }}" +failSwapOn: {{ kubelet_fail_swap_on|default(true) }} +authentication: + anonymous: + enabled: false + webhook: + enabled: {{ kubelet_authentication_token_webhook }} + x509: + clientCAFile: {{ kube_cert_dir }}/ca.crt +authorization: +{% if kubelet_authorization_mode_webhook %} + mode: Webhook +{% else %} + mode: AlwaysAllow +{% endif %} +{% if kubelet_enforce_node_allocatable is defined and kubelet_enforce_node_allocatable != "\"\"" %} +{% set kubelet_enforce_node_allocatable_list = kubelet_enforce_node_allocatable.split() %} +enforceNodeAllocatable: +{% for item in kubelet_enforce_node_allocatable_list %} +- {{ item }} +{% endfor %} +{% endif %} +staticPodPath: {{ kube_manifest_dir }} +cgroupDriver: {{ kubelet_cgroup_driver | default('systemd') }} +containerLogMaxFiles: {{ kubelet_logfiles_max_nr }} +containerLogMaxSize: {{ kubelet_logfiles_max_size }} +maxPods: {{ kubelet_max_pods }} +podPidsLimit: {{ kubelet_pod_pids_limit }} +address: {{ kubelet_bind_address }} +readOnlyPort: {{ kube_read_only_port }} +healthzPort: {{ kubelet_healthz_port }} +healthzBindAddress: {{ kubelet_healthz_bind_address }} +kubeletCgroups: {{ kubelet_kubelet_cgroups }} +clusterDomain: {{ dns_domain }} +{% if kubelet_protect_kernel_defaults|bool %} +protectKernelDefaults: true +{% endif %} +{% if kubelet_rotate_certificates|bool %} +rotateCertificates: true +{% endif %} +{% if kubelet_rotate_server_certificates|bool %} +serverTLSBootstrap: true +{% endif %} +{# DNS settings for kubelet #} +{% if enable_nodelocaldns %} +{% set kubelet_cluster_dns = [nodelocaldns_ip] %} +{% elif dns_mode in ['coredns'] %} +{% set kubelet_cluster_dns = [skydns_server] %} +{% elif dns_mode == 'coredns_dual' %} +{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %} +{% elif dns_mode == 'manual' %} +{% set kubelet_cluster_dns = [manual_dns_server] %} +{% else %} +{% set kubelet_cluster_dns = [] %} +{% endif %} +clusterDNS: +{% for dns_address in kubelet_cluster_dns %} +- {{ dns_address }} +{% endfor %} +{# Node reserved CPU/memory #} +kubeReserved: +{% if is_kube_master|bool %} + cpu: {{ kube_master_cpu_reserved }} + memory: {{ kube_master_memory_reserved }} +{% if kube_master_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ kube_master_ephemeral_storage_reserved }} +{% endif %} +{% if kube_master_pid_reserved is defined %} + pid: "{{ kube_master_pid_reserved }}" +{% endif %} +{% else %} + cpu: {{ kube_cpu_reserved }} + memory: {{ kube_memory_reserved }} +{% if kube_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ kube_ephemeral_storage_reserved }} +{% endif %} +{% if kube_pid_reserved is defined %} + pid: "{{ kube_pid_reserved }}" +{% endif %} +{% endif %} +{% if system_reserved is defined and system_reserved %} +systemReserved: +{% if is_kube_master|bool %} + cpu: {{ system_master_cpu_reserved }} + memory: {{ system_master_memory_reserved }} +{% if system_master_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ system_master_ephemeral_storage_reserved }} +{% endif %} +{% if system_master_pid_reserved is defined %} + pid: "{{ system_master_pid_reserved }}" +{% endif %} +{% else %} + cpu: {{ system_cpu_reserved }} + memory: {{ system_memory_reserved }} +{% if system_ephemeral_storage_reserved is defined %} + ephemeral-storage: {{ system_ephemeral_storage_reserved }} +{% endif %} +{% if system_pid_reserved is defined %} + pid: "{{ system_pid_reserved }}" +{% endif %} +{% endif %} +{% endif %} +{% if is_kube_master|bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %} +evictionHard: + {{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }} +{% elif not is_kube_master|bool and eviction_hard is defined and eviction_hard %} +evictionHard: + {{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }} +{% endif %} +resolvConf: "{{ kube_resolv_conf }}" +{% if kubelet_config_extra_args %} +{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }} +{% endif %} +{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %} +{{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }} +{% endif %} +{% if kubelet_feature_gates or kube_feature_gates %} +featureGates: +{% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %} + {{ feature|replace("=", ": ") }} +{% endfor %} +{% endif %} +{% if tls_min_version is defined %} +tlsMinVersion: {{ tls_min_version }} +{% endif %} +{% if tls_cipher_suites is defined %} +tlsCipherSuites: +{% for tls in tls_cipher_suites %} +- {{ tls }} +{% endfor %} +{% endif %} +{% if kubelet_event_record_qps %} +eventRecordQPS: {{ kubelet_event_record_qps }} +{% endif %} +shutdownGracePeriod: {{ kubelet_shutdown_grace_period }} +shutdownGracePeriodCriticalPods: {{ kubelet_shutdown_grace_period_critical_pods }} +{% if not kubelet_fail_swap_on|default(true) %} +memorySwap: + swapBehavior: {{ kubelet_swap_behavior|default("LimitedSwap") }} +{% endif %} +{% if kubelet_streaming_connection_idle_timeout is defined %} +streamingConnectionIdleTimeout: {{ kubelet_streaming_connection_idle_timeout }} +{% endif %} +{% if kubelet_make_iptables_util_chains is defined %} +makeIPTablesUtilChains: {{ kubelet_make_iptables_util_chains | bool }} +{% endif %} +{% if kubelet_seccomp_default is defined %} +seccompDefault: {{ kubelet_seccomp_default | bool }} +{% endif %} diff --git a/kubespray/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 b/kubespray/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 new file mode 100644 index 0000000..9397d7a --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 @@ -0,0 +1,43 @@ +KUBE_LOGTOSTDERR="--logtostderr=true" +KUBE_LOG_LEVEL="--v={{ kube_log_level }}" +KUBELET_ADDRESS="--node-ip={{ kubelet_address }}" +{% if kube_override_hostname|default('') %} +KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" +{% endif %} + +{# Base kubelet args #} +{% set kubelet_args_base -%} +{# start kubeadm specific settings #} +--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ +--config={{ kube_config_dir }}/kubelet-config.yaml \ +--kubeconfig={{ kube_config_dir }}/kubelet.conf \ +{# end kubeadm specific settings #} +--container-runtime=remote \ +--container-runtime-endpoint={{ cri_socket }} \ +--runtime-cgroups={{ kubelet_runtime_cgroups }} \ +{% endset %} + +{# Kubelet node taints for gpu #} +{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %} +{% if inventory_hostname in nvidia_gpu_nodes and node_taints is defined %} +{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %} +{% elif inventory_hostname in nvidia_gpu_nodes and node_taints is not defined %} +{% set node_taints = [] %} +{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %} +{% endif %} +{% endif %} + +KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}" +{% if kubelet_flexvolumes_plugins_dir is defined %} +KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}" +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin == "cloud" %} +KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet" +{% endif %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce", "external"] %} +KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config" +{% else %} +KUBELET_CLOUDPROVIDER="" +{% endif %} + +PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/kubespray/roles/kubernetes/node/templates/kubelet.service.j2 b/kubespray/roles/kubernetes/node/templates/kubelet.service.j2 new file mode 100644 index 0000000..feb8374 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/kubelet.service.j2 @@ -0,0 +1,34 @@ +[Unit] +Description=Kubernetes Kubelet Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After={{ container_manager }}.service +{% if container_manager == 'docker' %} +Wants=docker.socket +{% else %} +Wants={{ container_manager }}.service +{% endif %} + +[Service] +EnvironmentFile=-{{ kube_config_dir }}/kubelet.env +ExecStart={{ bin_dir }}/kubelet \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBELET_API_SERVER \ + $KUBELET_ADDRESS \ + $KUBELET_PORT \ + $KUBELET_HOSTNAME \ + $KUBELET_ARGS \ + $DOCKER_SOCKET \ + $KUBELET_NETWORK_PLUGIN \ + $KUBELET_VOLUME_PLUGIN \ + $KUBELET_CLOUDPROVIDER +Restart=always +RestartSec=10s +{% if kubelet_systemd_hardening %} +# Hardening setup +IPAddressDeny=any +IPAddressAllow={{ kubelet_secure_addresses }} +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/kubespray/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/kubespray/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 new file mode 100644 index 0000000..1d5d7d9 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 @@ -0,0 +1,43 @@ +global + maxconn 4000 + log 127.0.0.1 local0 + +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option redispatch + retries 5 + timeout http-request 5m + timeout queue 5m + timeout connect 30s + timeout client {{ loadbalancer_apiserver_keepalive_timeout }} + timeout server 15m + timeout http-keep-alive 30s + timeout check 30s + maxconn 4000 + +{% if loadbalancer_apiserver_healthcheck_port is defined -%} +frontend healthz + bind *:{{ loadbalancer_apiserver_healthcheck_port }} + mode http + monitor-uri /healthz +{% endif %} + +frontend kube_api_frontend + bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} + mode tcp + option tcplog + default_backend kube_api_backend + +backend kube_api_backend + mode tcp + balance leastconn + default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100 + option httpchk GET /healthz + http-check expect status 200 + {% for host in groups['kube_control_plane'] -%} + server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none + {% endfor -%} diff --git a/kubespray/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/kubespray/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 new file mode 100644 index 0000000..fd3e574 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 @@ -0,0 +1,60 @@ +error_log stderr notice; + +worker_processes 2; +worker_rlimit_nofile 130048; +worker_shutdown_timeout 10s; + +events { + multi_accept on; + use epoll; + worker_connections 16384; +} + +stream { + upstream kube_apiserver { + least_conn; + {% for host in groups['kube_control_plane'] -%} + server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }}; + {% endfor -%} + } + + server { + listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; + {% if enable_dual_stack_networks -%} + listen [::]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; + {% endif -%} + proxy_pass kube_apiserver; + proxy_timeout 10m; + proxy_connect_timeout 1s; + } +} + +http { + aio threads; + aio_write on; + tcp_nopush on; + tcp_nodelay on; + + keepalive_timeout {{ loadbalancer_apiserver_keepalive_timeout }}; + keepalive_requests 100; + reset_timedout_connection on; + server_tokens off; + autoindex off; + + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + server { + listen {{ loadbalancer_apiserver_healthcheck_port }}; + {% if enable_dual_stack_networks -%} + listen [::]:{{ loadbalancer_apiserver_healthcheck_port }}; + {% endif -%} + location /healthz { + access_log off; + return 200; + } + location /stub_status { + stub_status on; + access_log off; + } + } + {% endif %} +} diff --git a/kubespray/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 b/kubespray/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 new file mode 100644 index 0000000..1efcbae --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: haproxy + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-haproxy + annotations: + haproxy-cfg-checksum: "{{ haproxy_stat.stat.checksum }}" +spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + containers: + - name: haproxy + image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ loadbalancer_apiserver_cpu_requests }} + memory: {{ loadbalancer_apiserver_memory_requests }} + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + livenessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + {% endif -%} + volumeMounts: + - mountPath: /usr/local/etc/haproxy/ + name: etc-haproxy + readOnly: true + volumes: + - name: etc-haproxy + hostPath: + path: {{ haproxy_config_dir }} diff --git a/kubespray/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 b/kubespray/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 new file mode 100644 index 0000000..02887cf --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 @@ -0,0 +1,93 @@ +# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.5.5/pkg/kubevip/config_generator.go#L13 +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: {{ kube_vip_arp_enabled | string | to_json }} + - name: port + value: {{ kube_apiserver_port | string | to_json }} +{% if kube_vip_interface %} + - name: vip_interface + value: {{ kube_vip_interface | string | to_json }} +{% endif %} +{% if kube_vip_services_interface %} + - name: vip_servicesinterface + value: {{ kube_vip_services_interface | string | to_json }} +{% endif %} +{% if kube_vip_cidr %} + - name: vip_cidr + value: {{ kube_vip_cidr | string | to_json }} +{% endif %} +{% if kube_vip_controlplane_enabled %} + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: {{ kube_vip_ddns_enabled | string | to_json }} +{% endif %} +{% if kube_vip_services_enabled %} + - name: svc_enable + value: "true" +{% endif %} +{% if kube_vip_leader_election_enabled %} + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" +{% endif %} +{% if kube_vip_bgp_enabled %} + - name: bgp_enable + value: "true" + - name: bgp_routerid + value: {{ kube_vip_bgp_routerid | string | to_json }} + - name: bgp_as + value: {{ kube_vip_local_as | string | to_json }} + - name: bgp_peeraddress + value: {{ kube_vip_bgp_peeraddress | to_json }} + - name: bgp_peerpass + value: {{ kube_vip_bgp_peerpass | to_json }} + - name: bgp_peeras + value: {{ kube_vip_bgp_peeras | string | to_json }} +{% if kube_vip_bgppeers %} + - name: bgp_peers + value: {{ kube_vip_bgppeers | join(',') | to_json }} +{% endif %} +{% endif %} + - name: address + value: {{ kube_vip_address | to_json }} + image: {{ kube_vip_image_repo }}:{{ kube_vip_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} + diff --git a/kubespray/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/kubespray/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 new file mode 100644 index 0000000..04b9b73 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx-proxy + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-nginx + annotations: + nginx-cfg-checksum: "{{ nginx_stat.stat.checksum }}" +spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + containers: + - name: nginx-proxy + image: {{ nginx_image_repo }}:{{ nginx_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ loadbalancer_apiserver_cpu_requests }} + memory: {{ loadbalancer_apiserver_memory_requests }} + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + livenessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + {% endif -%} + volumeMounts: + - mountPath: /etc/nginx + name: etc-nginx + readOnly: true + volumes: + - name: etc-nginx + hostPath: + path: {{ nginx_config_dir }} diff --git a/kubespray/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 b/kubespray/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 new file mode 100644 index 0000000..4b8af60 --- /dev/null +++ b/kubespray/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem + client-key: {{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-{{ cluster_name }} +current-context: kubelet-{{ cluster_name }} diff --git a/kubespray/roles/kubernetes/node/vars/fedora.yml b/kubespray/roles/kubernetes/node/vars/fedora.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/roles/kubernetes/node/vars/fedora.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/roles/kubernetes/node/vars/ubuntu-18.yml b/kubespray/roles/kubernetes/node/vars/ubuntu-18.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/roles/kubernetes/node/vars/ubuntu-18.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/roles/kubernetes/node/vars/ubuntu-20.yml b/kubespray/roles/kubernetes/node/vars/ubuntu-20.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/roles/kubernetes/node/vars/ubuntu-20.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/roles/kubernetes/node/vars/ubuntu-22.yml b/kubespray/roles/kubernetes/node/vars/ubuntu-22.yml new file mode 100644 index 0000000..59bc55d --- /dev/null +++ b/kubespray/roles/kubernetes/node/vars/ubuntu-22.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/kubespray/roles/kubernetes/preinstall/defaults/main.yml b/kubespray/roles/kubernetes/preinstall/defaults/main.yml new file mode 100644 index 0000000..5537b52 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/defaults/main.yml @@ -0,0 +1,107 @@ +--- +# Set to true to allow pre-checks to fail and continue deployment +ignore_assert_errors: false + +epel_enabled: false +# Kubespray sets this to true after clusterDNS is running to apply changes to the host resolv.conf +dns_late: false + +common_required_pkgs: + - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1', 'openssl') }}" + - curl + - rsync + - socat + - unzip + - e2fsprogs + - xfsprogs + - ebtables + - bash-completion + - tar + +# Set to true if your network does not support IPv6 +# This maybe necessary for pulling Docker images from +# GCE docker repository +disable_ipv6_dns: false + +kube_owner: kube +kube_cert_group: kube-cert +kube_config_dir: /etc/kubernetes +kube_cert_dir: "{{ kube_config_dir }}/ssl" +kube_cert_compat_dir: /etc/kubernetes/pki +kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +# Flatcar Container Linux by Kinvolk cloud init config file to define /etc/resolv.conf content +# for hostnet pods and infra needs +resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf + +# All inventory hostnames will be written into each /etc/hosts file. +populate_inventory_to_hosts_file: true +# K8S Api FQDN will be written into /etc/hosts file. +populate_loadbalancer_apiserver_to_hosts_file: true + +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +etc_hosts_localhost_entries: + 127.0.0.1: + expected: + - localhost + - localhost.localdomain + ::1: + expected: + - localhost6 + - localhost6.localdomain + unexpected: + - localhost + - localhost.localdomain + +# Minimal memory requirement in MB for safety checks +minimal_node_memory_mb: 1024 +minimal_master_memory_mb: 1500 + +yum_repo_dir: /etc/yum.repos.d + +# number of times package install task should be retried +pkg_install_retries: 4 + +# Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +ping_access_ip: true + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +# The package to install which provides NTP functionality. +# The default is ntp for most platforms, or chrony on RHEL/CentOS 7 and later. +# The ntp_package can be one of ['ntp','chrony'] +ntp_package: >- + {% if ansible_os_family == "RedHat" -%} + chrony + {%- else -%} + ntp + {%- endif -%} + +# Manage the NTP configuration file. +ntp_manage_config: false +# Specify the NTP servers +# Only takes effect when ntp_manage_config is true. +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" +# Restrict NTP access to these hosts. +# Only takes effect when ntp_manage_config is true. +ntp_restrict: + - "127.0.0.1" + - "::1" +# The NTP driftfile path +# Only takes effect when ntp_manage_config is true. +ntp_driftfile: /var/lib/ntp/ntp.drift +# Enable tinker panic is useful when running NTP in a VM environment. +# Only takes effect when ntp_manage_config is true. +ntp_tinker_panic: false + +# Force sync time immediately after the ntp installed, which is useful in in newly installed system. +ntp_force_sync_immediately: false + +# Set the timezone for your server. eg: "Etc/UTC","Etc/GMT-8". If not set, the timezone will not change. +ntp_timezone: "" diff --git a/kubespray/roles/kubernetes/preinstall/files/dhclient_nodnsupdate b/kubespray/roles/kubernetes/preinstall/files/dhclient_nodnsupdate new file mode 100644 index 0000000..03c7c99 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/files/dhclient_nodnsupdate @@ -0,0 +1,4 @@ +#!/bin/sh +make_resolv_conf() { + : +} diff --git a/kubespray/roles/kubernetes/preinstall/gen-gitinfos.sh b/kubespray/roles/kubernetes/preinstall/gen-gitinfos.sh new file mode 100755 index 0000000..bfab5a4 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/gen-gitinfos.sh @@ -0,0 +1,73 @@ +#!/bin/sh +set -e + +# Text color variables +txtbld=$(tput bold) # Bold +bldred=${txtbld}$(tput setaf 1) # red +bldgre=${txtbld}$(tput setaf 2) # green +bldylw=${txtbld}$(tput setaf 3) # yellow +txtrst=$(tput sgr0) # Reset +err=${bldred}ERROR${txtrst} +info=${bldgre}INFO${txtrst} +warn=${bldylw}WARNING${txtrst} + +usage() +{ + cat << EOF +Generates a file which contains useful git informations + +Usage : $(basename $0) [global|diff] + ex : + Generate git information + $(basename $0) global + Generate diff from latest tag + $(basename $0) diff +EOF +} + +if [ $# != 1 ]; then + printf "\n$err : Needs 1 argument\n" + usage + exit 2 +fi; + +current_commit=$(git rev-parse HEAD) +latest_tag=$(git describe --abbrev=0 --tags) +latest_tag_commit=$(git show-ref -s ${latest_tag}) +tags_list=$(git tag --points-at "${latest_tag}") + +case ${1} in + "global") +cat<=') + msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}" + when: not ignore_assert_errors + +# simplify this items-list when https://github.com/ansible/ansible/issues/15753 is resolved +- name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")" + assert: + that: item.value|type_debug == 'bool' + msg: "{{ item.value }} isn't a bool" + run_once: yes + with_items: + - { name: download_run_once, value: "{{ download_run_once }}" } + - { name: deploy_netchecker, value: "{{ deploy_netchecker }}" } + - { name: download_always_pull, value: "{{ download_always_pull }}" } + - { name: helm_enabled, value: "{{ helm_enabled }}" } + - { name: openstack_lbaas_enabled, value: "{{ openstack_lbaas_enabled }}" } + when: not ignore_assert_errors + +- name: Stop if even number of etcd hosts + assert: + that: groups.etcd|length is not divisibleby 2 + when: + - not ignore_assert_errors + - inventory_hostname in groups.get('etcd',[]) + +- name: Stop if memory is too small for masters + assert: + that: ansible_memtotal_mb >= minimal_master_memory_mb + when: + - not ignore_assert_errors + - inventory_hostname in groups['kube_control_plane'] + +- name: Stop if memory is too small for nodes + assert: + that: ansible_memtotal_mb >= minimal_node_memory_mb + when: + - not ignore_assert_errors + - inventory_hostname in groups['kube_node'] + +# This assertion will fail on the safe side: One can indeed schedule more pods +# on a node than the CIDR-range has space for when additional pods use the host +# network namespace. It is impossible to ascertain the number of such pods at +# provisioning time, so to establish a guarantee, we factor these out. +# NOTICE: the check blatantly ignores the inet6-case +- name: Guarantee that enough network address space is available for all pods + assert: + that: "{{ (kubelet_max_pods | default(110)) | int <= (2 ** (32 - kube_network_node_prefix | int)) - 2 }}" + msg: "Do not schedule more pods on a node than inet addresses are available." + when: + - not ignore_assert_errors + - inventory_hostname in groups['k8s_cluster'] + - kube_network_node_prefix is defined + - kube_network_plugin != 'calico' + +- name: Stop if ip var does not match local ips + assert: + that: (ip in ansible_all_ipv4_addresses) or (ip in ansible_all_ipv6_addresses) + msg: "IPv4: '{{ ansible_all_ipv4_addresses }}' and IPv6: '{{ ansible_all_ipv6_addresses }}' do not contain '{{ ip }}'" + when: + - not ignore_assert_errors + - ip is defined + +- name: Ensure ping package + package: + name: >- + {%- if ansible_os_family == 'Debian' -%} + iputils-ping + {%- else -%} + iputils + {%- endif -%} + state: present + when: + - access_ip is defined + - not ignore_assert_errors + - ping_access_ip + - not is_fedora_coreos + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Stop if access_ip is not pingable + command: ping -c1 {{ access_ip }} + when: + - access_ip is defined + - not ignore_assert_errors + - ping_access_ip + +- name: Stop if RBAC is not enabled when dashboard is enabled + assert: + that: rbac_enabled + when: + - dashboard_enabled + - not ignore_assert_errors + +- name: Stop if RBAC is not enabled when OCI cloud controller is enabled + assert: + that: rbac_enabled + when: + - cloud_provider is defined and cloud_provider == "oci" + - not ignore_assert_errors + +- name: Stop if kernel version is too low + assert: + that: ansible_kernel.split('-')[0] is version('4.9.17', '>=') + when: + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + - not ignore_assert_errors + +- name: Stop if bad hostname + assert: + that: inventory_hostname is match("[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character" + when: not ignore_assert_errors + +- name: check cloud_provider value + assert: + that: cloud_provider in ['gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external'] + msg: "If set the 'cloud_provider' var must be set either to 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci' or 'external'" + when: + - cloud_provider is defined + - not ignore_assert_errors + tags: + - cloud-provider + - facts + +- name: "Check that kube_service_addresses is a network range" + assert: + that: + - kube_service_addresses | ipaddr('net') + msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range" + run_once: yes + +- name: "Check that kube_pods_subnet is a network range" + assert: + that: + - kube_pods_subnet | ipaddr('net') + msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range" + run_once: yes + +- name: "Check that kube_pods_subnet does not collide with kube_service_addresses" + assert: + that: + - kube_pods_subnet | ipaddr(kube_service_addresses) | string == 'None' + msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses" + run_once: yes + +- name: "Check that IP range is enough for the nodes" + assert: + that: + - 2 ** (kube_network_node_prefix - kube_pods_subnet | ipaddr('prefix')) >= groups['k8s_cluster'] | length + msg: "Not enough IPs are available for the desired node count." + when: kube_network_plugin != 'calico' + run_once: yes + +- name: Stop if unknown dns mode + assert: + that: dns_mode in ['coredns', 'coredns_dual', 'manual', 'none'] + msg: "dns_mode can only be 'coredns', 'coredns_dual', 'manual' or 'none'" + when: dns_mode is defined + run_once: true + +- name: Stop if unknown kube proxy mode + assert: + that: kube_proxy_mode in ['iptables', 'ipvs'] + msg: "kube_proxy_mode can only be 'iptables' or 'ipvs'" + when: kube_proxy_mode is defined + run_once: true + +- name: Stop if unknown cert_management + assert: + that: cert_management|d('script') in ['script', 'none'] + msg: "cert_management can only be 'script' or 'none'" + run_once: true + +- name: Stop if unknown resolvconf_mode + assert: + that: resolvconf_mode in ['docker_dns', 'host_resolvconf', 'none'] + msg: "resolvconf_mode can only be 'docker_dns', 'host_resolvconf' or 'none'" + when: resolvconf_mode is defined + run_once: true + +- name: Stop if etcd deployment type is not host, docker or kubeadm + assert: + that: etcd_deployment_type in ['host', 'docker', 'kubeadm'] + msg: "The etcd deployment type, 'etcd_deployment_type', must be host, docker or kubeadm" + when: + - inventory_hostname in groups.get('etcd',[]) + +- name: Stop if container manager is not docker, crio or containerd + assert: + that: container_manager in ['docker', 'crio', 'containerd'] + msg: "The container manager, 'container_manager', must be docker, crio or containerd" + run_once: true + +- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker + assert: + that: etcd_deployment_type in ['host', 'kubeadm'] + msg: "The etcd deployment type, 'etcd_deployment_type', must be host or kubeadm when container_manager is not docker" + when: + - inventory_hostname in groups.get('etcd',[]) + - container_manager != 'docker' + +# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled` +- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined + block: + - name: Warn the user if they are still using `etcd_kubeadm_enabled` + debug: + msg: > + "WARNING! => `etcd_kubeadm_enabled` is deprecated and will be removed in a future release. + You can set `etcd_deployment_type` to `kubeadm` instead of setting `etcd_kubeadm_enabled` to `true`." + changed_when: true + + - name: Stop if `etcd_kubeadm_enabled` is defined and `etcd_deployment_type` is not `kubeadm` or `host` + assert: + that: etcd_deployment_type == 'kubeadm' + msg: > + It is not possible to use `etcd_kubeadm_enabled` when `etcd_deployment_type` is set to {{ etcd_deployment_type }}. + Unset the `etcd_kubeadm_enabled` variable and set `etcd_deployment_type` to desired deployment type (`host`, `kubeadm`, `docker`) instead." + when: etcd_kubeadm_enabled + run_once: yes + when: etcd_kubeadm_enabled is defined + +- name: Stop if download_localhost is enabled but download_run_once is not + assert: + that: download_run_once + msg: "download_localhost requires enable download_run_once" + when: download_localhost + +- name: Stop if kata_containers_enabled is enabled when container_manager is docker + assert: + that: container_manager != 'docker' + msg: "kata_containers_enabled support only for containerd and crio-o. See https://github.com/kata-containers/documentation/blob/1.11.4/how-to/run-kata-with-k8s.md#install-a-cri-implementation for details" + when: kata_containers_enabled + +- name: Stop if gvisor_enabled is enabled when container_manager is not containerd + assert: + that: container_manager == 'containerd' + msg: "gvisor_enabled support only compatible with containerd. See https://github.com/kubernetes-sigs/kubespray/issues/7650 for details" + when: gvisor_enabled + +- name: Stop if download_localhost is enabled for Flatcar Container Linux + assert: + that: ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + msg: "download_run_once not supported for Flatcar Container Linux" + when: download_run_once or download_force_cache + +- name: Ensure minimum containerd version + assert: + that: containerd_version is version(containerd_min_version_required, '>=') + msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}" + run_once: yes + when: + - containerd_version not in ['latest', 'edge', 'stable'] + - container_manager == 'containerd' + +- name: Stop if using deprecated containerd_config variable + assert: + that: containerd_config is not defined + msg: "Variable containerd_config is now deprecated. See https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/group_vars/all/containerd.yml for details." + when: + - containerd_config is defined + - not ignore_assert_errors + +- name: Stop if auto_renew_certificates is enabled when certificates are managed externally (kube_external_ca_mode is true) + assert: + that: not auto_renew_certificates + msg: "Variable auto_renew_certificates must be disabled when CA are managed externally: kube_external_ca_mode = true" + when: + - kube_external_ca_mode + - not ignore_assert_errors + +- name: Stop if using deprecated comma separated list for admission plugins + assert: + that: "',' not in kube_apiserver_enable_admission_plugins[0]" + msg: "Comma-separated list for kube_apiserver_enable_admission_plugins is now deprecated, use separate list items for each plugin." + when: + - kube_apiserver_enable_admission_plugins is defined + - kube_apiserver_enable_admission_plugins | length > 0 diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/kubespray/roles/kubernetes/preinstall/tasks/0040-set_facts.yml new file mode 100644 index 0000000..3ae8412 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -0,0 +1,279 @@ +--- +- name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - facts + +- name: Set os_family fact for Kylin Linux Advanced Server and openEuler + set_fact: + ansible_os_family: "RedHat" + ansible_distribution_major_version: "8" + when: ansible_distribution in ["Kylin Linux Advanced Server", "openEuler"] + tags: + - facts + +- name: check if booted with ostree + stat: + path: /run/ostree-booted + get_attributes: no + get_checksum: no + get_mime: no + register: ostree + +- name: set is_fedora_coreos + lineinfile: + path: /etc/os-release + line: "VARIANT_ID=coreos" + state: present + check_mode: yes + register: os_variant_coreos + changed_when: false + +- name: set is_fedora_coreos + set_fact: + is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}" + +- name: check resolvconf + command: which resolvconf + register: resolvconf + failed_when: false + changed_when: false + check_mode: no + +- name: check existence of /etc/resolvconf/resolv.conf.d + stat: + path: /etc/resolvconf/resolv.conf.d + get_attributes: no + get_checksum: no + get_mime: no + failed_when: false + register: resolvconfd_path + +- name: check status of /etc/resolv.conf + stat: + path: /etc/resolv.conf + follow: no + get_attributes: no + get_checksum: no + get_mime: no + failed_when: false + register: resolvconf_stat + +- block: + + - name: get content of /etc/resolv.conf + slurp: + src: /etc/resolv.conf + register: resolvconf_slurp + + - name: get currently configured nameservers + set_fact: + configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}" + when: resolvconf_slurp.content is defined + + when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists + +- name: Stop if /etc/resolv.conf not configured nameservers + assert: + that: configured_nameservers|length>0 + fail_msg: "nameserver should not empty in /etc/resolv.conf" + when: + - not ignore_assert_errors + - configured_nameservers is defined + - not (upstream_dns_servers is defined and upstream_dns_servers|length > 0) + - not (disable_host_nameservers | default(false)) + +- name: NetworkManager | Check if host has NetworkManager + # noqa 303 Should we use service_facts for this? + command: systemctl is-active --quiet NetworkManager.service + register: networkmanager_enabled + failed_when: false + changed_when: false + check_mode: false + +- name: check systemd-resolved + # noqa 303 Should we use service_facts for this? + command: systemctl is-active systemd-resolved + register: systemd_resolved_enabled + failed_when: false + changed_when: false + check_mode: no + +- name: set default dns if remove_default_searchdomains is false + set_fact: + default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] + when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) + +- name: set dns facts + set_fact: + resolvconf: >- + {%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%} + bogus_domains: |- + {% for d in default_searchdomains|default([]) + searchdomains|default([]) -%} + {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./ + {%- endfor %} + cloud_resolver: "{{ ['169.254.169.254'] if cloud_provider is defined and cloud_provider == 'gce' else + ['169.254.169.253'] if cloud_provider is defined and cloud_provider == 'aws' else + [] }}" + +- name: check if kubelet is configured + stat: + path: "{{ kube_config_dir }}/kubelet.env" + get_attributes: no + get_checksum: no + get_mime: no + register: kubelet_configured + changed_when: false + +- name: check if early DNS configuration stage + set_fact: + dns_early: "{{ not kubelet_configured.stat.exists }}" + +- name: target resolv.conf files + set_fact: + resolvconffile: /etc/resolv.conf + base: >- + {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%} + head: >- + {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%} + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos + +- name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS) + set_fact: + resolvconffile: /tmp/resolveconf_cloud_init_conf + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] or is_fedora_coreos + +- name: check if /etc/dhclient.conf exists + stat: + path: /etc/dhclient.conf + get_attributes: no + get_checksum: no + get_mime: no + register: dhclient_stat + +- name: target dhclient conf file for /etc/dhclient.conf + set_fact: + dhclientconffile: /etc/dhclient.conf + when: dhclient_stat.stat.exists + +- name: check if /etc/dhcp/dhclient.conf exists + stat: + path: /etc/dhcp/dhclient.conf + get_attributes: no + get_checksum: no + get_mime: no + register: dhcp_dhclient_stat + +- name: target dhclient conf file for /etc/dhcp/dhclient.conf + set_fact: + dhclientconffile: /etc/dhcp/dhclient.conf + when: dhcp_dhclient_stat.stat.exists + +- name: target dhclient hook file for Red Hat family + set_fact: + dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh + when: ansible_os_family == "RedHat" + +- name: target dhclient hook file for Debian family + set_fact: + dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate + when: ansible_os_family == "Debian" + +- name: generate search domains to resolvconf + set_fact: + searchentries: + search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }} + domainentry: + domain {{ dns_domain }} + supersede_search: + supersede domain-search "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join('", "') }}"; + supersede_domain: + supersede domain-name "{{ dns_domain }}"; + +- name: pick coredns cluster IP or default resolver + set_fact: + coredns_server: |- + {%- if dns_mode == 'coredns' and not dns_early|bool -%} + {{ [ skydns_server ] }} + {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%} + {{ [ skydns_server ] + [ skydns_server_secondary ] }} + {%- elif dns_mode == 'manual' and not dns_early|bool -%} + {{ ( manual_dns_server.split(',') | list) }} + {%- elif dns_mode == 'none' and not dns_early|bool -%} + [] + {%- elif dns_early|bool -%} + {{ upstream_dns_servers|default([]) }} + {%- endif -%} + +# This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout +- name: generate nameservers for resolvconf, including cluster DNS + set_fact: + nameserverentries: |- + {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }} + supersede_nameserver: + supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + when: not dns_early or dns_late + +# This task should run instead of the above task when cluster/nodelocal DNS hasn't +# been deployed yet (like scale.yml/cluster.yml) or when it's down (reset.yml) +- name: generate nameservers for resolvconf, not including cluster DNS + set_fact: + nameserverentries: |- + {{ ( nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }} + supersede_nameserver: + supersede domain-name-servers {{ ( nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + when: dns_early and not dns_late + +- name: gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + +- name: set etcd vars if using kubeadm mode + set_fact: + etcd_cert_dir: "{{ kube_cert_dir }}" + kube_etcd_cacert_file: "etcd/ca.crt" + kube_etcd_cert_file: "apiserver-etcd-client.crt" + kube_etcd_key_file: "apiserver-etcd-client.key" + when: + - etcd_deployment_type == "kubeadm" + +- name: check /usr readonly + stat: + path: "/usr" + get_attributes: no + get_checksum: no + get_mime: no + register: usr + +- name: set alternate flexvolume path + set_fact: + kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins + when: not usr.stat.writeable + +- block: + - name: Ensure IPv6DualStack featureGate is set when enable_dual_stack_networks is true + set_fact: + kube_feature_gates: "{{ kube_feature_gates + [ 'IPv6DualStack=true' ] }}" + when: + - not 'IPv6DualStack=true' in kube_feature_gates + + - name: Ensure IPv6DualStack kubeadm featureGate is set when enable_dual_stack_networks is true + set_fact: + kubeadm_feature_gates: "{{ kubeadm_feature_gates + [ 'IPv6DualStack=true' ] }}" + when: + - not 'IPv6DualStack=true' in kubeadm_feature_gates + when: + - enable_dual_stack_networks + - kube_version is version('v1.24.0', '<') diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/kubespray/roles/kubernetes/preinstall/tasks/0050-create_directories.yml new file mode 100644 index 0000000..35d7e04 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0050-create_directories.yml @@ -0,0 +1,105 @@ +--- +- name: Create kubernetes directories + file: + path: "{{ item }}" + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + when: inventory_hostname in groups['k8s_cluster'] + become: true + tags: + - kubelet + - k8s-secrets + - kube-controller-manager + - kube-apiserver + - bootstrap-os + - apps + - network + - master + - node + with_items: + - "{{ kube_config_dir }}" + - "{{ kube_cert_dir }}" + - "{{ kube_manifest_dir }}" + - "{{ kube_script_dir }}" + - "{{ kubelet_flexvolumes_plugins_dir }}" + +- name: Create other directories + file: + path: "{{ item }}" + state: directory + owner: root + mode: 0755 + when: inventory_hostname in groups['k8s_cluster'] + become: true + tags: + - kubelet + - k8s-secrets + - kube-controller-manager + - kube-apiserver + - bootstrap-os + - apps + - network + - master + - node + with_items: + - "{{ bin_dir }}" + +- name: Check if kubernetes kubeadm compat cert dir exists + stat: + path: "{{ kube_cert_compat_dir }}" + get_attributes: no + get_checksum: no + get_mime: no + register: kube_cert_compat_dir_check + when: + - inventory_hostname in groups['k8s_cluster'] + - kube_cert_dir != kube_cert_compat_dir + +- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498) + file: + src: "{{ kube_cert_dir }}" + dest: "{{ kube_cert_compat_dir }}" + state: link + mode: 0755 + when: + - inventory_hostname in groups['k8s_cluster'] + - kube_cert_dir != kube_cert_compat_dir + - not kube_cert_compat_dir_check.stat.exists + +- name: Create cni directories + file: + path: "{{ item }}" + state: directory + owner: "{{ kube_owner }}" + mode: 0755 + with_items: + - "/etc/cni/net.d" + - "/opt/cni/bin" + - "/var/lib/calico" + when: + - kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"] + - inventory_hostname in groups['k8s_cluster'] + tags: + - network + - cilium + - calico + - weave + - canal + - kube-ovn + - kube-router + - bootstrap-os + +- name: Create local volume provisioner directories + file: + path: "{{ local_volume_provisioner_storage_classes[item].host_dir }}" + state: directory + owner: root + group: root + mode: "{{ local_volume_provisioner_directory_mode }}" + with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}" + when: + - inventory_hostname in groups['k8s_cluster'] + - local_volume_provisioner_enabled + tags: + - persistent_volumes diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/kubespray/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml new file mode 100644 index 0000000..4397cdd --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml @@ -0,0 +1,58 @@ +--- +- name: create temporary resolveconf cloud init file + command: cp -f /etc/resolv.conf "{{ resolvconffile }}" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Add domain/search/nameservers/options to resolv.conf + blockinfile: + path: "{{ resolvconffile }}" + block: |- + {% for item in [domainentry] + [searchentries] -%} + {{ item }} + {% endfor %} + {% for item in nameserverentries.split(',') %} + nameserver {{ item }} + {% endfor %} + options ndots:{{ ndots }} timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} + state: present + insertbefore: BOF + create: yes + backup: "{{ not resolvconf_stat.stat.islnk }}" + marker: "# Ansible entries {mark}" + mode: 0644 + notify: Preinstall | propagate resolvconf to k8s components + +- name: Remove search/domain/nameserver options before block + replace: + path: "{{ item[0] }}" + regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)' + backup: "{{ not resolvconf_stat.stat.islnk }}" + with_nested: + - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}" + - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] + notify: Preinstall | propagate resolvconf to k8s components + +- name: Remove search/domain/nameserver options after block + replace: + path: "{{ item[0] }}" + regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+' + replace: '\1' + backup: "{{ not resolvconf_stat.stat.islnk }}" + with_nested: + - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}" + - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] + notify: Preinstall | propagate resolvconf to k8s components + +- name: get temporary resolveconf cloud init file content + command: cat {{ resolvconffile }} + register: cloud_config + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: persist resolvconf cloud init file + template: + dest: "{{ resolveconf_cloud_init_conf }}" + src: resolvconf.j2 + owner: root + mode: 0644 + notify: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml b/kubespray/roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml new file mode 100644 index 0000000..3811358 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml @@ -0,0 +1,9 @@ +--- +- name: Write resolved.conf + template: + src: resolved.conf.j2 + dest: /etc/systemd/resolved.conf + owner: root + group: root + mode: 0644 + notify: Preinstall | Restart systemd-resolved diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml b/kubespray/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml new file mode 100644 index 0000000..1cd56d4 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml @@ -0,0 +1,28 @@ +--- +- name: NetworkManager | Ensure NetworkManager conf.d dir + file: + path: "/etc/NetworkManager/conf.d" + state: directory + recurse: yes + +- name: NetworkManager | Prevent NetworkManager from managing Calico interfaces (cali*/tunl*/vxlan.calico) + copy: + content: | + [keyfile] + unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico + dest: /etc/NetworkManager/conf.d/calico.conf + mode: 0644 + when: + - kube_network_plugin == "calico" + notify: Preinstall | reload NetworkManager + +# TODO: add other network_plugin interfaces + +- name: NetworkManager | Prevent NetworkManager from managing K8S interfaces (kube-ipvs0/nodelocaldns) + copy: + content: | + [keyfile] + unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns + dest: /etc/NetworkManager/conf.d/k8s.conf + mode: 0644 + notify: Preinstall | reload NetworkManager diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/kubespray/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml new file mode 100644 index 0000000..f245814 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml @@ -0,0 +1,35 @@ +--- +- name: NetworkManager | Add nameservers to NM configuration + ini_file: + path: /etc/NetworkManager/conf.d/dns.conf + section: global-dns-domain-* + option: servers + value: "{{ nameserverentries }}" + mode: '0600' + backup: yes + notify: Preinstall | update resolvconf for networkmanager + +- name: set default dns if remove_default_searchdomains is false + set_fact: + default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] + when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) + +- name: NetworkManager | Add DNS search to NM configuration + ini_file: + path: /etc/NetworkManager/conf.d/dns.conf + section: global-dns + option: searches + value: "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join(',') }}" + mode: '0600' + backup: yes + notify: Preinstall | update resolvconf for networkmanager + +- name: NetworkManager | Add DNS options to NM configuration + ini_file: + path: /etc/NetworkManager/conf.d/dns.conf + section: global-dns + option: options + value: "ndots:{{ ndots }};timeout:{{ dns_timeout|default('2') }};attempts:{{ dns_attempts|default('2') }};" + mode: '0600' + backup: yes + notify: Preinstall | update resolvconf for networkmanager diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/kubespray/roles/kubernetes/preinstall/tasks/0070-system-packages.yml new file mode 100644 index 0000000..b4fccfb --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -0,0 +1,98 @@ +--- +- name: Update package management cache (zypper) - SUSE + command: zypper -n --gpg-auto-import-keys ref + register: make_cache_output + until: make_cache_output is succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ansible_pkg_mgr == 'zypper' + tags: bootstrap-os + +- block: + - name: Add Debian Backports apt repo + apt_repository: + repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main" + state: present + filename: debian-backports + + - name: Set libseccomp2 pin priority to apt_preferences on Debian buster + copy: + content: | + Package: libseccomp2 + Pin: release a={{ ansible_distribution_release }}-backports + Pin-Priority: 1001 + dest: "/etc/apt/preferences.d/libseccomp2" + owner: "root" + mode: 0644 + when: + - ansible_distribution == "Debian" + - ansible_distribution_version == "10" + tags: + - bootstrap-os + +- name: Update package management cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: ansible_os_family == "Debian" + tags: + - bootstrap-os + +- name: Remove legacy docker repo file + file: + path: "{{ yum_repo_dir }}/docker.repo" + state: absent + when: + - ansible_os_family == "RedHat" + - not is_fedora_coreos + +- name: Install python3-dnf for latest RedHat versions + command: dnf install -y python3-dnf + register: dnf_task_result + until: dnf_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ansible_distribution == "Fedora" + - ansible_distribution_major_version|int >= 30 + - not is_fedora_coreos + changed_when: False + tags: + - bootstrap-os + +- name: Install epel-release on RHEL derivatives + package: + name: epel-release + state: present + when: + - ansible_os_family == "RedHat" + - not is_fedora_coreos + - epel_enabled|bool + tags: + - bootstrap-os + +- name: Update common_required_pkgs with ipvsadm when kube_proxy_mode is ipvs + set_fact: + common_required_pkgs: "{{ common_required_pkgs|default([]) + ['ipvsadm', 'ipset'] }}" + when: kube_proxy_mode == 'ipvs' + +- name: Install packages requirements + package: + name: "{{ required_pkgs | default([]) | union(common_required_pkgs|default([])) }}" + state: present + register: pkgs_task_result + until: pkgs_task_result is succeeded + retries: "{{ pkg_install_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) + tags: + - bootstrap-os + +- name: Install ipvsadm for ClearLinux + package: + name: ipvsadm + state: present + when: + - ansible_os_family in ["ClearLinux"] + - kube_proxy_mode == 'ipvs' diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/kubespray/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml new file mode 100644 index 0000000..dafa47f --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -0,0 +1,138 @@ +--- +# Todo : selinux configuration +- name: Confirm selinux deployed + stat: + path: /etc/selinux/config + get_attributes: no + get_checksum: no + get_mime: no + when: + - ansible_os_family == "RedHat" + - "'Amazon' not in ansible_distribution" + register: slc + +- name: Set selinux policy + selinux: + policy: targeted + state: "{{ preinstall_selinux_state }}" + when: + - ansible_os_family == "RedHat" + - "'Amazon' not in ansible_distribution" + - slc.stat.exists + changed_when: False + tags: + - bootstrap-os + +- name: Disable IPv6 DNS lookup + lineinfile: + dest: /etc/gai.conf + line: "precedence ::ffff:0:0/96 100" + state: present + create: yes + backup: yes + mode: 0644 + when: + - disable_ipv6_dns + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - bootstrap-os + +- name: Clean previously used sysctl file locations + file: + path: "/etc/sysctl.d/{{ item }}" + state: absent + with_items: + - ipv4-ip_forward.conf + - bridge-nf-call.conf + +- name: Stat sysctl file configuration + stat: + path: "{{ sysctl_file_path }}" + get_attributes: no + get_checksum: no + get_mime: no + register: sysctl_file_stat + tags: + - bootstrap-os + +- name: Change sysctl file path to link source if linked + set_fact: + sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}" + when: + - sysctl_file_stat.stat.islnk is defined + - sysctl_file_stat.stat.islnk + tags: + - bootstrap-os + +- name: Make sure sysctl file path folder exists + file: + name: "{{ sysctl_file_path | dirname }}" + state: directory + mode: 0755 + +- name: Enable ip forwarding + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: net.ipv4.ip_forward + value: "1" + state: present + reload: yes + +- name: Enable ipv6 forwarding + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: net.ipv6.conf.all.forwarding + value: 1 + state: present + reload: yes + when: enable_dual_stack_networks | bool + +- name: Check if we need to set fs.may_detach_mounts + stat: + path: /proc/sys/fs/may_detach_mounts + get_attributes: no + get_checksum: no + get_mime: no + register: fs_may_detach_mounts + ignore_errors: true # noqa ignore-errors + +- name: Set fs.may_detach_mounts if needed + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: fs.may_detach_mounts + value: 1 + state: present + reload: yes + when: fs_may_detach_mounts.stat.exists | d(false) + +- name: Ensure kube-bench parameters are set + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: "{{ item.name }}" + value: "{{ item.value }}" + state: present + reload: yes + with_items: + - { name: kernel.keys.root_maxbytes, value: 25000000 } + - { name: kernel.keys.root_maxkeys, value: 1000000 } + - { name: kernel.panic, value: 10 } + - { name: kernel.panic_on_oops, value: 1 } + - { name: vm.overcommit_memory, value: 1 } + - { name: vm.panic_on_oom, value: 0 } + when: kubelet_protect_kernel_defaults|bool + +- name: Check dummy module + modprobe: + name: dummy + state: present + params: 'numdummies=0' + when: enable_nodelocaldns + +- name: Set additional sysctl variables + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: "{{ item.name }}" + value: "{{ item.value }}" + state: present + reload: yes + with_items: "{{ additional_sysctl }}" diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/kubespray/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml new file mode 100644 index 0000000..d80d14e --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml @@ -0,0 +1,79 @@ +--- +- name: Ensure NTP package + package: + name: + - "{{ ntp_package }}" + state: present + +- name: Disable systemd-timesyncd + service: + name: systemd-timesyncd.service + enabled: false + state: stopped + failed_when: false + +- name: Set fact NTP settings + set_fact: + ntp_config_file: >- + {% if ntp_package == "ntp" -%} + /etc/ntp.conf + {%- elif ansible_os_family in ['RedHat', 'Suse'] -%} + /etc/chrony.conf + {%- else -%} + /etc/chrony/chrony.conf + {%- endif -%} + ntp_service_name: >- + {% if ntp_package == "chrony" -%} + chronyd + {%- elif ansible_os_family == 'RedHat' -%} + ntpd + {%- else -%} + ntp + {%- endif %} + +- name: Generate NTP configuration file. + template: + src: "{{ ntp_config_file | basename }}.j2" + dest: "{{ ntp_config_file }}" + mode: 0644 + notify: Preinstall | restart ntp + when: + - ntp_manage_config + +- name: Stop the NTP Deamon For Sync Immediately # `ntpd -gq`,`chronyd -q` requires the ntp daemon stop + service: + name: "{{ ntp_service_name }}" + state: stopped + when: + - ntp_force_sync_immediately + +- name: Force Sync NTP Immediately + command: >- + timeout -k 60s 60s + {% if ntp_package == "ntp" -%} + ntpd -gq + {%- else -%} + chronyd -q + {%- endif -%} + when: + - ntp_force_sync_immediately + +- name: Ensure NTP service is started and enabled + service: + name: "{{ ntp_service_name }}" + state: started + enabled: true + +- name: Ensure tzdata package + package: + name: + - tzdata + state: present + when: + - ntp_timezone + +- name: Set timezone + timezone: + name: "{{ ntp_timezone }}" + when: + - ntp_timezone diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/kubespray/roles/kubernetes/preinstall/tasks/0090-etchosts.yml new file mode 100644 index 0000000..ae4ffad --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0090-etchosts.yml @@ -0,0 +1,77 @@ +--- +- name: Hosts | create list from inventory + set_fact: + etc_hosts_inventory_block: |- + {% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} + {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%} + {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }} + {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %} + + {% endif %} + {% endfor %} + delegate_to: localhost + connection: local + delegate_facts: yes + run_once: yes + +- name: Hosts | populate inventory into hosts file + blockinfile: + path: /etc/hosts + block: "{{ hostvars.localhost.etc_hosts_inventory_block }}" + state: present + create: yes + backup: yes + unsafe_writes: yes + marker: "# Ansible inventory hosts {mark}" + mode: 0644 + when: populate_inventory_to_hosts_file + +- name: Hosts | populate kubernetes loadbalancer address into hosts file + lineinfile: + dest: /etc/hosts + regexp: ".*{{ apiserver_loadbalancer_domain_name }}$" + line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}" + state: present + backup: yes + unsafe_writes: yes + when: + - populate_loadbalancer_apiserver_to_hosts_file + - loadbalancer_apiserver is defined + - loadbalancer_apiserver.address is defined + +- name: Hosts | Retrieve hosts file content + slurp: + src: /etc/hosts + register: etc_hosts_content + +- name: Hosts | Extract existing entries for localhost from hosts file + set_fact: + etc_hosts_localhosts_dict: >- + {%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%} + {{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }} + with_items: "{{ (etc_hosts_content['content'] | b64decode).splitlines() }}" + when: + - etc_hosts_content.content is defined + - (item is match('^::1 .*') or item is match('^127.0.0.1 .*')) + +- name: Hosts | Update target hosts file entries dict with required entries + set_fact: + etc_hosts_localhosts_dict_target: >- + {%- set target_entries = (etc_hosts_localhosts_dict|default({})).get(item.key, []) | difference(item.value.get('unexpected' ,[])) -%} + {{ etc_hosts_localhosts_dict_target|default({}) | combine({item.key: (target_entries + item.value.expected)|unique}) }} + loop: "{{ etc_hosts_localhost_entries|dict2items }}" + +- name: Hosts | Update (if necessary) hosts file + lineinfile: + dest: /etc/hosts + line: "{{ item.key }} {{ item.value|join(' ') }}" + regexp: "^{{ item.key }}.*$" + state: present + backup: yes + unsafe_writes: yes + loop: "{{ etc_hosts_localhosts_dict_target|default({})|dict2items }}" + +# gather facts to update ansible_fqdn +- name: Update facts + setup: + gather_subset: min diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/kubespray/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml new file mode 100644 index 0000000..50a6202 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml @@ -0,0 +1,33 @@ +--- +- name: Configure dhclient to supersede search/domain/nameservers + blockinfile: + block: |- + {% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%} + {{ item }} + {% endfor %} + path: "{{ dhclientconffile }}" + create: yes + state: present + insertbefore: BOF + backup: yes + marker: "# Ansible entries {mark}" + mode: 0644 + notify: Preinstall | propagate resolvconf to k8s components + +- name: Configure dhclient hooks for resolv.conf (non-RH) + template: + src: dhclient_dnsupdate.sh.j2 + dest: "{{ dhclienthookfile }}" + owner: root + mode: 0755 + notify: Preinstall | propagate resolvconf to k8s components + when: ansible_os_family not in [ "RedHat", "Suse" ] + +- name: Configure dhclient hooks for resolv.conf (RH-only) + template: + src: dhclient_dnsupdate_rh.sh.j2 + dest: "{{ dhclienthookfile }}" + owner: root + mode: 0755 + notify: Preinstall | propagate resolvconf to k8s components + when: ansible_os_family == "RedHat" diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml b/kubespray/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml new file mode 100644 index 0000000..024e39f --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml @@ -0,0 +1,18 @@ +--- + +# These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x +# or when changing resolvconf_mode) + +- name: Remove kubespray specific config from dhclient config + blockinfile: + path: "{{ dhclientconffile }}" + state: absent + backup: yes + marker: "# Ansible entries {mark}" + notify: Preinstall | propagate resolvconf to k8s components + +- name: Remove kubespray specific dhclient hook + file: + path: "{{ dhclienthookfile }}" + state: absent + notify: Preinstall | propagate resolvconf to k8s components diff --git a/kubespray/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/kubespray/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml new file mode 100644 index 0000000..598399b --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml @@ -0,0 +1,44 @@ +--- + +# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time + +- name: install growpart + package: + name: cloud-utils-growpart + state: present + +- name: Gather mounts facts + setup: + gather_subset: 'mounts' + +- name: Search root filesystem device + vars: + query: "[?mount=='/'].device" + _root_device: "{{ ansible_mounts|json_query(query) }}" + set_fact: + device: "{{ _root_device | first | regex_replace('([^0-9]+)[0-9]+', '\\1') }}" + partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}" + root_device: "{{ _root_device }}" + +- name: check if growpart needs to be run + command: growpart -N {{ device }} {{ partition }} + failed_when: False + changed_when: "'NOCHANGE:' not in growpart_needed.stdout" + register: growpart_needed + environment: + LC_ALL: C + +- name: check fs type + command: file -Ls {{ root_device }} + changed_when: False + register: fs_type + +- name: run growpart # noqa 503 + command: growpart {{ device }} {{ partition }} + when: growpart_needed.changed + environment: + LC_ALL: C + +- name: run xfs_growfs # noqa 503 + command: xfs_growfs {{ root_device }} + when: growpart_needed.changed and 'XFS' in fs_type.stdout diff --git a/kubespray/roles/kubernetes/preinstall/tasks/main.yml b/kubespray/roles/kubernetes/preinstall/tasks/main.yml new file mode 100644 index 0000000..45fa3d1 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/tasks/main.yml @@ -0,0 +1,134 @@ +--- +# Disable swap +- import_tasks: 0010-swapoff.yml + when: + - not dns_late + - disable_swap + +- import_tasks: 0020-verify-settings.yml + when: + - not dns_late + tags: + - asserts + +- import_tasks: 0040-set_facts.yml + tags: + - resolvconf + - facts + +- import_tasks: 0050-create_directories.yml + when: + - not dns_late + +- import_tasks: 0060-resolvconf.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - systemd_resolved_enabled.rc != 0 + - networkmanager_enabled.rc != 0 + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0061-systemd-resolved.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - systemd_resolved_enabled.rc == 0 + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0062-networkmanager-unmanaged-devices.yml + when: + - networkmanager_enabled.rc == 0 + tags: + - bootstrap-os + +- import_tasks: 0063-networkmanager-dns.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - networkmanager_enabled.rc == 0 + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0070-system-packages.yml + when: + - not dns_late + tags: + - bootstrap-os + +- import_tasks: 0080-system-configurations.yml + when: + - not dns_late + tags: + - bootstrap-os + +- import_tasks: 0081-ntp-configurations.yml + when: + - not dns_late + - ntp_enabled + tags: + - bootstrap-os + +- import_tasks: 0090-etchosts.yml + when: + - not dns_late + tags: + - bootstrap-os + - etchosts + +- import_tasks: 0100-dhclient-hooks.yml + when: + - dns_mode != 'none' + - resolvconf_mode == 'host_resolvconf' + - dhclientconffile is defined + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - bootstrap-os + - resolvconf + +- import_tasks: 0110-dhclient-hooks-undo.yml + when: + - dns_mode != 'none' + - resolvconf_mode != 'host_resolvconf' + - dhclientconffile is defined + - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + tags: + - bootstrap-os + - resolvconf + +# We need to make sure the network is restarted early enough so that docker can later pick up the correct system +# nameservers and search domains +- name: Flush handlers + meta: flush_handlers + +- name: Check if we are running inside a Azure VM + stat: + path: /var/lib/waagent/ + get_attributes: no + get_checksum: no + get_mime: no + register: azure_check + when: + - not dns_late + tags: + - bootstrap-os + +- import_tasks: 0120-growpart-azure-centos-7.yml + when: + - not dns_late + - azure_check.stat.exists + - ansible_os_family == "RedHat" + tags: + - bootstrap-os + +- name: Run calico checks + include_role: + name: network_plugin/calico + tasks_from: check + when: + - kube_network_plugin == 'calico' + - not ignore_assert_errors diff --git a/kubespray/roles/kubernetes/preinstall/templates/ansible_git.j2 b/kubespray/roles/kubernetes/preinstall/templates/ansible_git.j2 new file mode 100644 index 0000000..abf92a7 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/ansible_git.j2 @@ -0,0 +1,3 @@ +; This file contains the information which identifies the deployment state relative to the git repo +[default] +{{ gitinfo.stdout }} diff --git a/kubespray/roles/kubernetes/preinstall/templates/chrony.conf.j2 b/kubespray/roles/kubernetes/preinstall/templates/chrony.conf.j2 new file mode 100644 index 0000000..7931f43 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/chrony.conf.j2 @@ -0,0 +1,27 @@ +# {{ ansible_managed }} + +# Specify one or more NTP servers. +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in ntp_servers %} +server {{ server }} +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +{% if ntp_tinker_panic is sameas true %} +# Force time sync if the drift exceeds the threshold specified +# Useful for VMs that can be paused and much later resumed. +makestep 1.0 -1 +{% else %} +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 +{% endif %} + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Specify directory for log files. +logdir /var/log/chrony diff --git a/kubespray/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 b/kubespray/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 new file mode 100644 index 0000000..8cf8b81 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 @@ -0,0 +1,13 @@ +#!/bin/sh +# +# Prepend resolver options to /etc/resolv.conf after dhclient` +# regenerates the file. See man (5) resolver for more details. +# +if [ $reason = "BOUND" ]; then + if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then + RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d') + OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}" + + printf "%b\n" "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf + fi +fi diff --git a/kubespray/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 b/kubespray/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 new file mode 100644 index 0000000..511839f --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 @@ -0,0 +1,17 @@ +#!/bin/sh +# +# Prepend resolver options to /etc/resolv.conf after dhclient` +# regenerates the file. See man (5) resolver for more details. +# +zdnsupdate_config() { + if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then + RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d') + OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}" + + echo -e "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf + fi +} + +zdnsupdate_restore() { + : +} diff --git a/kubespray/roles/kubernetes/preinstall/templates/ntp.conf.j2 b/kubespray/roles/kubernetes/preinstall/templates/ntp.conf.j2 new file mode 100644 index 0000000..abeb899 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/ntp.conf.j2 @@ -0,0 +1,45 @@ +# {{ ansible_managed }} + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile {{ ntp_driftfile }} + +{% if ntp_tinker_panic is sameas true %} +# Always reset the clock, even if the new time is more than 1000s away +# from the current system time. Useful for VMs that can be paused +# and much later resumed. +tinker panic 0 +{% endif %} + +# Specify one or more NTP servers. +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for item in ntp_servers %} +pool {{ item }} +{% endfor %} + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery limited +restrict -6 default kod notrap nomodify nopeer noquery limited + +# Local users may interrogate the ntp server more closely. +{% for item in ntp_restrict %} +restrict {{ item }} +{% endfor %} + +# Needed for adding pool entries +restrict source notrap nomodify noquery + +# Disable the monitoring facility to prevent amplification attacks using ntpdc +# monlist command when default restrict does not include the noquery flag. See +# CVE-2013-5211 for more details. +# Note: Monitoring will not be disabled with the limited restriction flag. +disable monitor diff --git a/kubespray/roles/kubernetes/preinstall/templates/resolvconf.j2 b/kubespray/roles/kubernetes/preinstall/templates/resolvconf.j2 new file mode 100644 index 0000000..807fdd0 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/resolvconf.j2 @@ -0,0 +1,10 @@ +#cloud-config +write_files: + - path: "/etc/resolv.conf" + permissions: "0644" + owner: "root" + content: | + {% for l in cloud_config.stdout_lines %} + {{ l }} + {% endfor %} + # diff --git a/kubespray/roles/kubernetes/preinstall/templates/resolved.conf.j2 b/kubespray/roles/kubernetes/preinstall/templates/resolved.conf.j2 new file mode 100644 index 0000000..901fd24 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/templates/resolved.conf.j2 @@ -0,0 +1,21 @@ +[Resolve] +{% if dns_early is sameas true and dns_late is sameas false %} +#DNS= +{% else %} +DNS={{ ([nodelocaldns_ip] if enable_nodelocaldns else coredns_server )| list | join(' ') }} +{% endif %} +FallbackDNS={{ ( upstream_dns_servers|d([]) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(' ') }} +{% if remove_default_searchdomains is sameas false or (remove_default_searchdomains is sameas true and searchdomains|default([])|length==0)%} +Domains={{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }} +{% else %} +Domains={{ searchdomains|default([]) | join(' ') }} +{% endif %} +#LLMNR=no +#MulticastDNS=no +DNSSEC=no +Cache=no-negative +{% if ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] %} +DNSStubListener=no +{% else %} +#DNSStubListener=yes +{% endif %} diff --git a/kubespray/roles/kubernetes/preinstall/vars/amazon.yml b/kubespray/roles/kubernetes/preinstall/vars/amazon.yml new file mode 100644 index 0000000..09c645f --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/amazon.yml @@ -0,0 +1,7 @@ +--- +required_pkgs: + - libselinux-python + - device-mapper-libs + - nss + - conntrack-tools + - libseccomp diff --git a/kubespray/roles/kubernetes/preinstall/vars/centos.yml b/kubespray/roles/kubernetes/preinstall/vars/centos.yml new file mode 100644 index 0000000..2a5b6c7 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/centos.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + - device-mapper-libs + - nss + - conntrack + - container-selinux + - libseccomp diff --git a/kubespray/roles/kubernetes/preinstall/vars/debian-11.yml b/kubespray/roles/kubernetes/preinstall/vars/debian-11.yml new file mode 100644 index 0000000..59cbc5a --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/debian-11.yml @@ -0,0 +1,10 @@ +--- +required_pkgs: + - python3-apt + - gnupg + - apt-transport-https + - software-properties-common + - conntrack + - iptables + - apparmor + - libseccomp2 diff --git a/kubespray/roles/kubernetes/preinstall/vars/debian.yml b/kubespray/roles/kubernetes/preinstall/vars/debian.yml new file mode 100644 index 0000000..51a2802 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/debian.yml @@ -0,0 +1,9 @@ +--- +required_pkgs: + - python-apt + - aufs-tools + - apt-transport-https + - software-properties-common + - conntrack + - apparmor + - libseccomp2 diff --git a/kubespray/roles/kubernetes/preinstall/vars/fedora.yml b/kubespray/roles/kubernetes/preinstall/vars/fedora.yml new file mode 100644 index 0000000..d69b111 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/fedora.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - iptables + - libselinux-python3 + - device-mapper-libs + - conntrack + - container-selinux + - libseccomp diff --git a/kubespray/roles/kubernetes/preinstall/vars/redhat.yml b/kubespray/roles/kubernetes/preinstall/vars/redhat.yml new file mode 100644 index 0000000..2a5b6c7 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/redhat.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" + - device-mapper-libs + - nss + - conntrack + - container-selinux + - libseccomp diff --git a/kubespray/roles/kubernetes/preinstall/vars/suse.yml b/kubespray/roles/kubernetes/preinstall/vars/suse.yml new file mode 100644 index 0000000..d089ac1 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/suse.yml @@ -0,0 +1,5 @@ +--- +required_pkgs: + - device-mapper + - conntrack-tools + - libseccomp2 diff --git a/kubespray/roles/kubernetes/preinstall/vars/ubuntu.yml b/kubespray/roles/kubernetes/preinstall/vars/ubuntu.yml new file mode 100644 index 0000000..85b3f25 --- /dev/null +++ b/kubespray/roles/kubernetes/preinstall/vars/ubuntu.yml @@ -0,0 +1,8 @@ +--- +required_pkgs: + - python3-apt + - apt-transport-https + - software-properties-common + - conntrack + - apparmor + - libseccomp2 diff --git a/kubespray/roles/kubernetes/tokens/files/kube-gen-token.sh b/kubespray/roles/kubernetes/tokens/files/kube-gen-token.sh new file mode 100644 index 0000000..121b522 --- /dev/null +++ b/kubespray/roles/kubernetes/tokens/files/kube-gen-token.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +token_dir=${TOKEN_DIR:-/var/srv/kubernetes} +token_file="${token_dir}/known_tokens.csv" + +create_accounts=($@) + +if [ ! -e "${token_file}" ]; then + touch "${token_file}" +fi + +for account in "${create_accounts[@]}"; do + if grep ",${account}," "${token_file}" ; then + continue + fi + token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${token},${account},${account}" >> "${token_file}" + echo "${token}" > "${token_dir}/${account}.token" + echo "Added ${account}" +done diff --git a/kubespray/roles/kubernetes/tokens/tasks/check-tokens.yml b/kubespray/roles/kubernetes/tokens/tasks/check-tokens.yml new file mode 100644 index 0000000..ae75f0d --- /dev/null +++ b/kubespray/roles/kubernetes/tokens/tasks/check-tokens.yml @@ -0,0 +1,41 @@ +--- +- name: "Check_tokens | check if the tokens have already been generated on first master" + stat: + path: "{{ kube_token_dir }}/known_tokens.csv" + get_attributes: no + get_checksum: yes + get_mime: no + delegate_to: "{{ groups['kube_control_plane'][0] }}" + register: known_tokens_master + run_once: true + +- name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false" + set_fact: + sync_tokens: false + gen_tokens: false + +- name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true" + set_fact: + gen_tokens: true + when: not known_tokens_master.stat.exists and kube_token_auth|default(true) + run_once: true + +- name: "Check tokens | check if a cert already exists" + stat: + path: "{{ kube_token_dir }}/known_tokens.csv" + get_attributes: no + get_checksum: yes + get_mime: no + register: known_tokens + +- name: "Check_tokens | Set 'sync_tokens' to true" + set_fact: + sync_tokens: >- + {%- set tokens = {'sync': False} -%} + {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch) + if (not hostvars[server].known_tokens.stat.exists) or + (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%} + {%- set _ = tokens.update({'sync': True}) -%} + {%- endfor -%} + {{ tokens.sync }} + run_once: true diff --git a/kubespray/roles/kubernetes/tokens/tasks/gen_tokens.yml b/kubespray/roles/kubernetes/tokens/tasks/gen_tokens.yml new file mode 100644 index 0000000..aa1cf21 --- /dev/null +++ b/kubespray/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -0,0 +1,64 @@ +--- +- name: Gen_tokens | copy tokens generation script + copy: + src: "kube-gen-token.sh" + dest: "{{ kube_script_dir }}/kube-gen-token.sh" + mode: 0700 + run_once: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: gen_tokens|default(false) + +- name: Gen_tokens | generate tokens for master components + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" + environment: + TOKEN_DIR: "{{ kube_token_dir }}" + with_nested: + - [ "system:kubectl" ] + - "{{ groups['kube_control_plane'] }}" + register: gentoken_master + changed_when: "'Added' in gentoken_master.stdout" + run_once: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: gen_tokens|default(false) + +- name: Gen_tokens | generate tokens for node components + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" + environment: + TOKEN_DIR: "{{ kube_token_dir }}" + with_nested: + - [ 'system:kubelet' ] + - "{{ groups['kube_node'] }}" + register: gentoken_node + changed_when: "'Added' in gentoken_node.stdout" + run_once: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: gen_tokens|default(false) + +- name: Gen_tokens | Get list of tokens from first master + command: "find {{ kube_token_dir }} -maxdepth 1 -type f" + register: tokens_list + check_mode: no + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: sync_tokens|default(false) + +- name: Gen_tokens | Gather tokens + shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" + args: + warn: false + executable: /bin/bash + register: tokens_data + check_mode: no + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: sync_tokens|default(false) + +- name: Gen_tokens | Copy tokens on masters + shell: "set -o pipefail && echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /" + args: + executable: /bin/bash + when: + - inventory_hostname in groups['kube_control_plane'] + - sync_tokens|default(false) + - inventory_hostname != groups['kube_control_plane'][0] + - tokens_data.stdout diff --git a/kubespray/roles/kubernetes/tokens/tasks/main.yml b/kubespray/roles/kubernetes/tokens/tasks/main.yml new file mode 100644 index 0000000..d454a80 --- /dev/null +++ b/kubespray/roles/kubernetes/tokens/tasks/main.yml @@ -0,0 +1,19 @@ +--- + +- import_tasks: check-tokens.yml + tags: + - k8s-secrets + - k8s-gen-tokens + - facts + +- name: Make sure the tokens directory exits + file: + path: "{{ kube_token_dir }}" + state: directory + mode: 0644 + group: "{{ kube_cert_group }}" + +- import_tasks: gen_tokens.yml + tags: + - k8s-secrets + - k8s-gen-tokens diff --git a/kubespray/roles/kubespray-defaults/defaults/main.yaml b/kubespray/roles/kubespray-defaults/defaults/main.yaml new file mode 100644 index 0000000..00b7388 --- /dev/null +++ b/kubespray/roles/kubespray-defaults/defaults/main.yaml @@ -0,0 +1,680 @@ +--- +# Use proxycommand if bastion host is in group all +# This change obseletes editing ansible.cfg file depending on bastion existence +ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p -p {{ hostvars['bastion']['ansible_port'] | default(22) }} {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" + +# selinux state +preinstall_selinux_state: permissive + +kube_api_anonymous_auth: true + +# Default value, but will be set to true automatically if detected +is_fedora_coreos: false + +# optional disable the swap +disable_swap: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.25.5 + +## The minimum version working +kube_version_min_required: v1.23.0 + +## Kube Proxy mode One of ['iptables','ipvs'] +kube_proxy_mode: ipvs + +## List of kubeadm init phases that should be skipped during control plane setup +## By default 'addon/coredns' is skipped +## 'addon/kube-proxy' gets skipped for some network plugins +kubeadm_init_phases_skip_default: [ "addon/coredns" ] +kubeadm_init_phases_skip: >- + {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- elif kube_proxy_remove is defined and kube_proxy_remove -%} + {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ] + {%- else -%} + {{ kubeadm_init_phases_skip_default }} + {%- endif -%} + +# List of kubeadm phases that should be skipped when joining a new node +# You may need to set this to ['preflight'] for air-gaped deployments to avoid failing connectivity tests. +kubeadm_join_phases_skip_default: [] +kubeadm_join_phases_skip: >- + {{ kubeadm_join_phases_skip_default }} + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# Set to true to allow pre-checks to fail and continue deployment +ignore_assert_errors: false + +kube_vip_enabled: false + +# nginx-proxy configure +nginx_config_dir: "/etc/nginx" + +# haproxy configure +haproxy_config_dir: "/etc/haproxy" + +# Directory where the binaries will be installed +bin_dir: /usr/local/bin +docker_bin_dir: /usr/bin +containerd_bin_dir: "{{ bin_dir }}" +etcd_data_dir: /var/lib/etcd +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# Install epel repo on Centos/RHEL +epel_enabled: false + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# Default resolv.conf options +docker_dns_options: +- ndots:{{ ndots }} +- timeout:2 +- attempts:2 +# Can be coredns, coredns_dual, manual, or none +dns_mode: coredns + +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 + +# Should be set to a cluster IP if using a custom cluster DNS +manual_dns_server: "" + +# Can be host_resolvconf, docker_dns or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes DNS service (called skydns for historical reasons) +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" +docker_dns_search_domains: +- 'default.svc.{{ dns_domain }}' +- 'svc.{{ dns_domain }}' + +kube_dns_servers: + coredns: ["{{skydns_server}}"] + coredns_dual: "{{[skydns_server] + [ skydns_server_secondary ]}}" + manual: ["{{manual_dns_server}}"] + +dns_servers: "{{kube_dns_servers[dns_mode]}}" + +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local + +enable_coredns_k8s_endpoint_pod_names: false + +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# Kubectl command +# This is for consistency when using kubectl command in roles, and ensure +kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# compatibility directory for kubeadm +kube_cert_compat_dir: "/etc/kubernetes/pki" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Set to true when the CAs are managed externally. +# When true, disables all tasks manipulating certificates. Ensure before the kubespray run that: +# - Certificates and CAs are present in kube_cert_dir +# - Kubeconfig files are present in kube_config_dir +kube_external_ca_mode: false + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico +kube_network_plugin_multus: false + +# Determines if calico_rr group exists +peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}" + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +calico_datastore: "kdd" + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The virtual cluster IP, real host IPs and ports the API Server will be +# listening on. +# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint +# access IP value (automatically evaluated below) +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" + +# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too. +kube_apiserver_bind_address: 0.0.0.0 + +# https +kube_apiserver_port: 6443 + +# If non-empty, will use this string as identification instead of the actual hostname +kube_override_hostname: >- + {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} + {%- else -%} + {{ inventory_hostname }} + {%- endif -%} + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Aggregator +kube_api_aggregator_routing: false + +# Profiling +kube_profiling: false + +# Graceful Node Shutdown +kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods should be less than kubelet_shutdown_grace_period +# to give normal pods time to be gracefully evacuated +kubelet_shutdown_grace_period_critical_pods: 20s + +# Whether to deploy the container engine +deploy_container_engine: "{{ inventory_hostname in groups['k8s_cluster'] or etcd_deployment_type == 'docker' }}" + +# Container for runtime +container_manager: containerd + +# Enable Kata Containers as additional container runtime +# When enabled, it requires `container_manager` different than Docker +kata_containers_enabled: false + +# Enable gVisor as an additional container runtime +# gVisor is only supported with container_manager Docker or containerd +gvisor_enabled: false + +# Enable crun as additional container runtime +# When enabled, it requires container_manager=crio +crun_enabled: false + +# Enable youki as additional container runtime +# When enabled, it requires container_manager=crio +youki_enabled: false + +# Container on localhost (download images when download_localhost is true) +container_manager_on_localhost: "{{ container_manager }}" + +# CRI socket path +cri_socket: >- + {%- if container_manager == 'crio' -%} + unix:///var/run/crio/crio.sock + {%- elif container_manager == 'containerd' -%} + unix:///var/run/containerd/containerd.sock + {%- elif container_manager == 'docker' -%} + unix:///var/run/cri-dockerd.sock + {%- endif -%} + +## Uncomment this if you want to force overlay/overlay2 as docker storage driver +## Please note that overlay2 is only supported on newer kernels +# docker_storage_options: -s overlay2 + +## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7. +docker_container_storage_setup: false + +## It must be define a disk path for docker_container_storage_setup_devs. +## Otherwise docker-storage-setup will be executed incorrectly. +# docker_container_storage_setup_devs: /dev/vdb + +## Only set this if you have more than 3 nameservers: +## If true Kubespray will only use the first 3, otherwise it will fail +docker_dns_servers_strict: false + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## Used to set docker daemon iptables options to true +docker_iptables_enabled: "false" + +# Docker log options +# Rotate container stderr/stdout logs at 50m and keep last 5 +docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5" + +## A list of insecure docker registries (IP address or domain name), for example +## to allow insecure-registry access to self-hosted registries. Empty by default. +# docker_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11 +docker_insecure_registries: [] + +## A list of additional registry mirrors, for example China registry mirror. Empty by default. +# docker_registry_mirrors: +# - https://registry.docker-cn.com +# - https://mirror.aliyuncs.com +docker_registry_mirrors: [] + +## If non-empty will override default system MounFlags value. +## This option takes a mount propagation flag: shared, slave +## or private, which control whether mounts in the file system +## namespace set up for docker will receive or propagate mounts +## and unmounts. Leave empty for system default +# docker_mount_flags: + +## A string of extra options to pass to the docker daemon. +# docker_options: "" + +## A list of plugins to install using 'docker plugin install --grant-all-permissions' +## Empty by default so no plugins will be installed. +docker_plugins: [] + +# Containerd options - thse are relevant when container_manager == 'containerd' +containerd_use_systemd_cgroup: true + +## An obvious use case is allowing insecure-registry access to self hosted registries. +## Can be ipaddress and domain_name. +## example define mirror.registry.io or 172.19.16.11:5000 +## Port number is also needed if the default HTTPS port is not used. +# containerd_insecure_registries: +# - mirror.registry.io +# - 172.19.16.11:5000 +containerd_insecure_registries: [] + +# Containerd conf default dir +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_systemd_dir: "/etc/systemd/system/containerd.service.d" +containerd_cfg_dir: "/etc/containerd" + +# Settings for containerized control plane (etcd/kubelet/secrets) +# deployment type for legacy etcd mode +etcd_deployment_type: host +cert_management: script + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +kubeconfig_localhost: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +kubectl_localhost: false + +# Define credentials_dir here so it can be overridden +credentials_dir: "{{ inventory_dir }}/credentials" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +dashboard_enabled: false + +# Addons which can be enabled +helm_enabled: false +krew_enabled: false +registry_enabled: false +metrics_server_enabled: false +enable_network_policy: true +local_path_provisioner_enabled: false +local_volume_provisioner_enabled: false +local_volume_provisioner_directory_mode: 0700 +cinder_csi_enabled: false +aws_ebs_csi_enabled: false +azure_csi_enabled: false +gcp_pd_csi_enabled: false +vsphere_csi_enabled: false +upcloud_csi_enabled: false +csi_snapshot_controller_enabled: false +persistent_volumes_enabled: false +cephfs_provisioner_enabled: false +rbd_provisioner_enabled: false +ingress_nginx_enabled: false +ingress_alb_enabled: false +cert_manager_enabled: false +expand_persistent_volumes: false +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +argocd_enabled: false + +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +openstack_blockstorage_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}" +# set max volumes per node (cinder-csi), default not set +# node_volume_attach_limit: 25 +# Cinder CSI topology, when false volumes can be cross-mounted between availability zones +# cinder_topology: false +# Set Cinder topology zones (can be multiple zones, default not set) +# cinder_topology_zones: +# - nova +cinder_csi_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}" + +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +openstack_lbaas_enabled: false +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +openstack_lbaas_create_monitor: "yes" +openstack_lbaas_monitor_delay: "1m" +openstack_lbaas_monitor_timeout: "30s" +openstack_lbaas_monitor_max_retries: "3" +openstack_cacert: "{{ lookup('env','OS_CACERT') }}" + +# Default values for the external OpenStack Cloud Controller +external_openstack_enable_ingress_hostname: false +external_openstack_ingress_hostname_suffix: "nip.io" +external_openstack_max_shared_lb: 2 +external_openstack_lbaas_create_monitor: false +external_openstack_lbaas_monitor_delay: "1m" +external_openstack_lbaas_monitor_timeout: "30s" +external_openstack_lbaas_monitor_max_retries: "3" +external_openstack_network_ipv6_disabled: false +external_openstack_lbaas_use_octavia: false +external_openstack_network_internal_networks: [] +external_openstack_network_public_networks: [] + +# Default values for the external Hcloud Cloud Controller +external_hcloud_cloud: + hcloud_api_token: "" + token_secret_name: hcloud + + service_account_name: cloud-controller-manager + + controller_image_tag: "latest" + ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset + ## Format: + ## external_hcloud_cloud.controller_extra_args: + ## arg1: "value1" + ## arg2: "value2" + controller_extra_args: {} + +## List of authorization modes that must be configured for +## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and +## 'RBAC' modes are tested. Order is important. +authorization_modes: ['Node', 'RBAC'] +rbac_enabled: "{{ 'RBAC' in authorization_modes }}" + +# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet's HTTPS endpoint +kubelet_authentication_token_webhook: true + +# When enabled, access to the kubelet API requires authorization by delegation to the API server +kubelet_authorization_mode_webhook: false + +# kubelet uses certificates for authenticating to the Kubernetes API +# Automatically generate a new key and request a new certificate from the Kubernetes API as the current certificate approaches expiration +kubelet_rotate_certificates: true +# kubelet can also request a new server certificate from the Kubernetes API +kubelet_rotate_server_certificates: false + +# If set to true, kubelet errors if any of kernel tunables is different than kubelet defaults +kubelet_protect_kernel_defaults: true + +# Set additional sysctl variables to modify Linux kernel variables, for example: +# additional_sysctl: +# - { name: kernel.pid_max, value: 131072 } +# +additional_sysctl: [] + +## List of key=value pairs that describe feature gates for +## the k8s cluster. +kube_feature_gates: [] +kube_apiserver_feature_gates: [] +kube_controller_feature_gates: [] +kube_scheduler_feature_gates: [] +kube_proxy_feature_gates: [] +kubelet_feature_gates: [] +kubeadm_feature_gates: [] + +# Local volume provisioner storage classes +# Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted +# see https://github.com/ansible/ansible/issues/17324 +local_volume_provisioner_storage_classes: | + { + "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { + "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}", + "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", + "volume_mode": "Filesystem", + "fs_type": "ext4" + + } + } + +# weave's network password for encryption +# if null then no network encryption +# you can use --extra-vars to pass the password in command line +weave_password: EnterPasswordHere + +ssl_ca_dirs: |- + [ + {% if ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] -%} + '/usr/share/ca-certificates', + {% elif ansible_os_family == 'RedHat' -%} + '/etc/pki/tls', + '/etc/pki/ca-trust', + {% elif ansible_os_family == 'Debian' -%} + '/usr/share/ca-certificates', + {% endif -%} + ] + +# Vars for pointing to kubernetes api endpoints +is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}" +kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}" +kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}" +kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" +first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(fallback_ips[groups['kube_control_plane'][0]])) }}" +loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}" +loadbalancer_apiserver_type: "nginx" +# applied if only external loadbalancer_apiserver is defined, otherwise ignored +apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" +kube_apiserver_global_endpoint: |- + {% if loadbalancer_apiserver is defined -%} + https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool -%} + https://127.0.0.1:{{ kube_apiserver_port }} + {%- else -%} + https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- endif %} +kube_apiserver_endpoint: |- + {% if loadbalancer_apiserver is defined -%} + https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%} + https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} + {%- elif is_kube_master -%} + https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }} + {%- else -%} + https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }} + {%- endif %} +kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt" +kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key" + +# Set to true to deploy etcd-events cluster +etcd_events_cluster_enabled: false + +# etcd group can be empty when kubeadm manages etcd +etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}" + +# Vars for pointing to etcd endpoints +is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" +etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}" +etcd_access_address: "{{ access_ip | default(etcd_address) }}" +etcd_events_access_address: "{{ access_ip | default(etcd_address) }}" +etcd_peer_url: "https://{{ etcd_access_address }}:2380" +etcd_client_url: "https://{{ etcd_access_address }}:2379" +etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382" +etcd_events_client_url: "https://{{ etcd_events_access_address }}:2383" +etcd_access_addresses: |- + {% for item in etcd_hosts -%} + https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:2379{% if not loop.last %},{% endif %} + {%- endfor %} +etcd_events_access_addresses_list: |- + [ + {% for item in etcd_hosts -%} + 'https://{{ hostvars[item]['etcd_events_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:2383'{% if not loop.last %},{% endif %} + {%- endfor %} + ] +etcd_metrics_addresses: |- + {% for item in etcd_hosts -%} + https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %} + {%- endfor %} +etcd_events_access_addresses: "{{etcd_events_access_addresses_list | join(',')}}" +etcd_events_access_addresses_semicolon: "{{etcd_events_access_addresses_list | join(';')}}" +# user should set etcd_member_name in inventory/mycluster/hosts.ini +etcd_member_name: |- + {% for host in groups['etcd'] %} + {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %} + {% endfor %} +etcd_peer_addresses: |- + {% for item in groups['etcd'] -%} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %} + {%- endfor %} +etcd_events_peer_addresses: |- + {% for item in groups['etcd'] -%} + {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %} + {%- endfor %} + +podsecuritypolicy_enabled: false +etcd_heartbeat_interval: "250" +etcd_election_timeout: "5000" +etcd_snapshot_count: "10000" + +certificates_key_size: 2048 +certificates_duration: 36500 + +etcd_config_dir: /etc/ssl/etcd +etcd_events_data_dir: "/var/lib/etcd-events" +etcd_cert_dir: "{{ etcd_config_dir }}/ssl" + +typha_enabled: false + +calico_apiserver_enabled: false + +_host_architecture_groups: + x86_64: amd64 + aarch64: arm64 + armv7l: arm +host_architecture: >- + {%- if ansible_architecture in _host_architecture_groups -%} + {{ _host_architecture_groups[ansible_architecture] }} + {%- else -%} + {{ ansible_architecture }} + {%- endif -%} + +_host_os_groups: + Linux: linux + Darwin: darwin + Win32NT: windows +host_os: >- + {%- if ansible_system in _host_os_groups -%} + {{ _host_os_groups[ansible_system] }} + {%- else -%} + {{ ansible_system }} + {%- endif -%} + +# Sets the eventRecordQPS parameter in kubelet-config.yaml. The default value is 5 (see types.go) +# Setting it to 0 allows unlimited requests per second. +kubelet_event_record_qps: 5 + +proxy_env: + http_proxy: "{{ http_proxy | default ('') }}" + HTTP_PROXY: "{{ http_proxy | default ('') }}" + https_proxy: "{{ https_proxy | default ('') }}" + HTTPS_PROXY: "{{ https_proxy | default ('') }}" + no_proxy: "{{ no_proxy | default ('') }}" + NO_PROXY: "{{ no_proxy | default ('') }}" + +proxy_disable_env: + ALL_PROXY: '' + FTP_PROXY: '' + HTTPS_PROXY: '' + HTTP_PROXY: '' + NO_PROXY: '' + all_proxy: '' + ftp_proxy: '' + http_proxy: '' + https_proxy: '' + no_proxy: '' + +# krew root dir +krew_root_dir: "/usr/local/krew" + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/kubespray/roles/kubespray-defaults/meta/main.yml b/kubespray/roles/kubespray-defaults/meta/main.yml new file mode 100644 index 0000000..88d7024 --- /dev/null +++ b/kubespray/roles/kubespray-defaults/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: download + skip_downloads: true + tags: + - facts diff --git a/kubespray/roles/kubespray-defaults/tasks/fallback_ips.yml b/kubespray/roles/kubespray-defaults/tasks/fallback_ips.yml new file mode 100644 index 0000000..acca31c --- /dev/null +++ b/kubespray/roles/kubespray-defaults/tasks/fallback_ips.yml @@ -0,0 +1,31 @@ +--- +# Set 127.0.0.1 as fallback IP if we do not have host facts for host +# ansible_default_ipv4 isn't what you think. +# Thanks https://medium.com/opsops/ansible-default-ipv4-is-not-what-you-think-edb8ab154b10 + +- name: Gather ansible_default_ipv4 from all hosts + tags: always + include_tasks: fallback_ips_gather.yml + when: hostvars[delegate_host_to_gather_facts].ansible_default_ipv4 is not defined + loop: "{{ groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]) }}" + loop_control: + loop_var: delegate_host_to_gather_facts + run_once: yes + +- name: create fallback_ips_base + set_fact: + fallback_ips_base: | + --- + {% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %} + {% set found = hostvars[item].get('ansible_default_ipv4') %} + {{ item }}: "{{ found.get('address', '127.0.0.1') }}" + {% endfor %} + delegate_to: localhost + connection: local + delegate_facts: yes + become: no + run_once: yes + +- name: set fallback_ips + set_fact: + fallback_ips: "{{ hostvars.localhost.fallback_ips_base | from_yaml }}" diff --git a/kubespray/roles/kubespray-defaults/tasks/fallback_ips_gather.yml b/kubespray/roles/kubespray-defaults/tasks/fallback_ips_gather.yml new file mode 100644 index 0000000..2d2d000 --- /dev/null +++ b/kubespray/roles/kubespray-defaults/tasks/fallback_ips_gather.yml @@ -0,0 +1,11 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: "Gather ansible_default_ipv4 from {{ delegate_host_to_gather_facts }}" + setup: + gather_subset: '!all,network' + filter: "ansible_default_ipv4" + delegate_to: "{{ delegate_host_to_gather_facts }}" + connection: "{{ (delegate_host_to_gather_facts == 'localhost') | ternary('local', omit) }}" + delegate_facts: yes diff --git a/kubespray/roles/kubespray-defaults/tasks/main.yaml b/kubespray/roles/kubespray-defaults/tasks/main.yaml new file mode 100644 index 0000000..648a4af --- /dev/null +++ b/kubespray/roles/kubespray-defaults/tasks/main.yaml @@ -0,0 +1,33 @@ +--- +- name: Configure defaults + debug: + msg: "Check roles/kubespray-defaults/defaults/main.yml" + tags: + - always + +# do not run gather facts when bootstrap-os in roles +- name: set fallback_ips + import_tasks: fallback_ips.yml + when: + - "'bootstrap-os' not in ansible_play_role_names" + - fallback_ips is not defined + tags: + - always + +- name: set no_proxy + import_tasks: no_proxy.yml + when: + - "'bootstrap-os' not in ansible_play_role_names" + - http_proxy is defined or https_proxy is defined + - no_proxy is not defined + tags: + - always + +# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled` +- name: Set `etcd_deployment_type` to "kubeadm" if `etcd_kubeadm_enabled` is true + set_fact: + etcd_deployment_type: kubeadm + when: + - etcd_kubeadm_enabled is defined and etcd_kubeadm_enabled + tags: + - always diff --git a/kubespray/roles/kubespray-defaults/tasks/no_proxy.yml b/kubespray/roles/kubespray-defaults/tasks/no_proxy.yml new file mode 100644 index 0000000..6e6a5c9 --- /dev/null +++ b/kubespray/roles/kubespray-defaults/tasks/no_proxy.yml @@ -0,0 +1,38 @@ +--- +- name: Set no_proxy to all assigned cluster IPs and hostnames + set_fact: + no_proxy_prepare: >- + {%- if loadbalancer_apiserver is defined -%} + {{ apiserver_loadbalancer_domain_name| default('') }}, + {{ loadbalancer_apiserver.address | default('') }}, + {%- endif -%} + {%- if no_proxy_exclude_workers | default(false) -%} + {% set cluster_or_master = 'kube_control_plane' %} + {%- else -%} + {% set cluster_or_master = 'k8s_cluster' %} + {%- endif -%} + {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} + {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}, + {%- if item != hostvars[item].get('ansible_hostname', '') -%} + {{ hostvars[item]['ansible_hostname'] }}, + {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}, + {%- endif -%} + {{ item }},{{ item }}.{{ dns_domain }}, + {%- endfor -%} + {%- if additional_no_proxy is defined -%} + {{ additional_no_proxy }}, + {%- endif -%} + 127.0.0.1,localhost,{{ kube_service_addresses }},{{ kube_pods_subnet }},svc,svc.{{ dns_domain }} + delegate_to: localhost + connection: local + delegate_facts: yes + become: no + run_once: yes + +- name: Populates no_proxy to all hosts + set_fact: + no_proxy: "{{ hostvars.localhost.no_proxy_prepare }}" + proxy_env: "{{ proxy_env | combine({ + 'no_proxy': hostvars.localhost.no_proxy_prepare, + 'NO_PROXY': hostvars.localhost.no_proxy_prepare + }) }}" diff --git a/kubespray/roles/kubespray-defaults/vars/main.yml b/kubespray/roles/kubespray-defaults/vars/main.yml new file mode 100644 index 0000000..c79edf5 --- /dev/null +++ b/kubespray/roles/kubespray-defaults/vars/main.yml @@ -0,0 +1,9 @@ +--- +# Kubespray constants + +kube_proxy_deployed: "{{ 'addon/kube-proxy' not in kubeadm_init_phases_skip }}" + +# The lowest version allowed to upgrade from (same as calico_version in the previous branch) +calico_min_version_required: "v3.19.4" + +containerd_min_version_required: "1.3.7" diff --git a/kubespray/roles/network_plugin/calico/defaults/main.yml b/kubespray/roles/network_plugin/calico/defaults/main.yml new file mode 100644 index 0000000..0c2be2d --- /dev/null +++ b/kubespray/roles/network_plugin/calico/defaults/main.yml @@ -0,0 +1,165 @@ +--- +# the default value of name +calico_cni_name: k8s-pod-network + +# Enables Internet connectivity from containers +nat_outgoing: true + +# add default ippool name +calico_pool_name: "default-pool" +calico_ipv4pool_ipip: "Off" + +# Change encapsulation mode, by default we enable vxlan which is the most mature and well tested mode +calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet' +calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet' + +calico_cni_pool: true +calico_cni_pool_ipv6: true + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# Calico doesn't support ipip tunneling for the IPv6. +calico_ipip_mode_ipv6: Never +calico_vxlan_mode_ipv6: Never + +# add default ipv6 ippool blockSize (defaults kube_network_node_prefix_ipv6) +calico_pool_blocksize_ipv6: 122 + +# Calico network backend can be 'bird', 'vxlan' and 'none' +calico_network_backend: vxlan + +calico_cert_dir: /etc/calico/certs + +# Global as_num (/calico/bgp/v1/global/as_num) +global_as_num: "64512" + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Advertise Service External IPs +calico_advertise_service_external_ips: [] + +# Advertise Service LoadBalancer IPs +calico_advertise_service_loadbalancer_ips: [] + +# Calico eBPF support +calico_bpf_enabled: false +calico_bpf_log_level: "" +# Valid option for service mode: Tunnel (default), DSR=Direct Server Return +calico_bpf_service_mode: Tunnel + +# Limits for apps +calico_node_memory_limit: 500M +calico_node_cpu_limit: 300m +calico_node_memory_requests: 64M +calico_node_cpu_requests: 150m +calico_felix_chaininsertmode: Insert + +# Calico daemonset nodeselector +calico_ds_nodeselector: "kubernetes.io/os: linux" + +# Virtual network ID to use for VXLAN traffic. A value of 0 means “use the kernel defaultâ€. +calico_vxlan_vni: 4096 + +# Port to use for VXLAN traffic. A value of 0 means “use the kernel defaultâ€. +calico_vxlan_port: 4789 + +# Enable Prometheus Metrics endpoint for felix +calico_felix_prometheusmetricsenabled: false +calico_felix_prometheusmetricsport: 9091 +calico_felix_prometheusgometricsenabled: true +calico_felix_prometheusprocessmetricsenabled: true + +# Set the agent log level. Can be debug, warning, info or fatal +calico_loglevel: info +calico_node_startup_loglevel: error + +# Set log path for calico CNI plugin. Set to false to disable logging to disk. +calico_cni_log_file_path: /var/log/calico/cni/cni.log + +# Enable or disable usage report to 'usage.projectcalico.org' +calico_usage_reporting: false + +# Should calico ignore kernel's RPF check setting, +# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198 +calico_node_ignorelooserpf: false + +# Define address on which Felix will respond to health requests +calico_healthhost: "localhost" + +# Configure time in seconds that calico will wait for the iptables lock +calico_iptables_lock_timeout_secs: 10 + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND) +calico_iptables_backend: "Auto" + +# Calico Wireguard support +calico_wireguard_enabled: false +calico_wireguard_packages: [] +calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-{{ ansible_distribution_major_version }}-$basearch/ + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://projectcalico.docs.tigera.io/reference/node/configuration#ip-autodetection-methods +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" + +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# The default value for calico_datastore is set in role kubespray-default + +# Use typha (only with kdd) +typha_enabled: false +typha_prometheusmetricsenabled: false +typha_prometheusmetricsport: 9093 + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +typha_replicas: 1 + +# Set max typha connections +typha_max_connections_lower_limit: 300 + +# Generate certifcates for typha<->calico-node communication +typha_secure: false + +calico_feature_control: {} + +# Calico default BGP port +calico_bgp_listen_port: 179 + +# Calico FelixConfiguration options +calico_felix_reporting_interval: 0s +calico_felix_log_severity_screen: Info + +# Calico container settings +calico_allow_ip_forwarding: false + +# Calico IPAM strictAffinity +calico_ipam_strictaffinity: false + +# Calico IPAM autoAllocateBlocks +calico_ipam_autoallocateblocks: true + +# Calico IPAM maxBlocksPerHost, default 0 +calico_ipam_maxblocksperhost: 0 + +# Calico apiserver (only with kdd) +calico_apiserver_enabled: false + +# Calico feature detect override, set "ChecksumOffloadBroken=true" to +# solve the https://github.com/projectcalico/calico/issues/3145 +calico_feature_detect_override: "" diff --git a/kubespray/roles/network_plugin/calico/files/openssl.conf b/kubespray/roles/network_plugin/calico/files/openssl.conf new file mode 100644 index 0000000..f4ba47d --- /dev/null +++ b/kubespray/roles/network_plugin/calico/files/openssl.conf @@ -0,0 +1,27 @@ +req_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment + +[ ssl_client ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +[ v3_ca ] +basicConstraints = CA:TRUE +keyUsage = cRLSign, digitalSignature, keyCertSign +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid:always,issuer + +[ ssl_client_apiserver ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +subjectAltName = DNS:calico-api.calico-apiserver.svc diff --git a/kubespray/roles/network_plugin/calico/handlers/main.yml b/kubespray/roles/network_plugin/calico/handlers/main.yml new file mode 100644 index 0000000..b4b7af8 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/handlers/main.yml @@ -0,0 +1,27 @@ +--- +- name: reset_calico_cni + command: /bin/true + when: calico_cni_config is defined + notify: + - delete 10-calico.conflist + - Calico | delete calico-node docker containers + - Calico | delete calico-node crio/containerd containers + +- name: delete 10-calico.conflist + file: + path: /etc/cni/net.d/10-calico.conflist + state: absent + +- name: Calico | delete calico-node docker containers + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + register: docker_calico_node_remove + until: docker_calico_node_remove is succeeded + retries: 5 + when: container_manager in ["docker"] + +- name: Calico | delete calico-node crio/containerd containers + shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + register: crictl_calico_node_remove + until: crictl_calico_node_remove is succeeded + retries: 5 + when: container_manager in ["crio", "containerd"] diff --git a/kubespray/roles/network_plugin/calico/rr/defaults/main.yml b/kubespray/roles/network_plugin/calico/rr/defaults/main.yml new file mode 100644 index 0000000..dedda19 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/rr/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# Global as_num (/calico/bgp/v1/global/as_num) +# should be the same as in calico role +global_as_num: "64512" +calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" diff --git a/kubespray/roles/network_plugin/calico/rr/tasks/main.yml b/kubespray/roles/network_plugin/calico/rr/tasks/main.yml new file mode 100644 index 0000000..6164552 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/rr/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Calico-rr | Pre-upgrade tasks + include_tasks: pre.yml + +- name: Calico-rr | Configuring node tasks + include_tasks: update-node.yml + +- name: Calico-rr | Set label for route reflector # noqa 301 + command: >- + {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} + 'i-am-a-route-reflector=true' --overwrite + changed_when: false + register: calico_rr_label + until: calico_rr_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 diff --git a/kubespray/roles/network_plugin/calico/rr/tasks/pre.yml b/kubespray/roles/network_plugin/calico/rr/tasks/pre.yml new file mode 100644 index 0000000..d8dbd80 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/rr/tasks/pre.yml @@ -0,0 +1,15 @@ +--- +- name: Calico-rr | Disable calico-rr service if it exists + service: + name: calico-rr + state: stopped + enabled: no + failed_when: false + +- name: Calico-rr | Delete obsolete files + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/calico/calico-rr.env + - /etc/systemd/system/calico-rr.service diff --git a/kubespray/roles/network_plugin/calico/rr/tasks/update-node.yml b/kubespray/roles/network_plugin/calico/rr/tasks/update-node.yml new file mode 100644 index 0000000..7070076 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/rr/tasks/update-node.yml @@ -0,0 +1,48 @@ +--- +# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it, +# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203 +- block: + - name: Set the retry count + set_fact: + retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}" + + - name: Calico | Set label for route reflector # noqa 301 305 + shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite" + changed_when: false + register: calico_rr_id_label + until: calico_rr_id_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + when: calico_rr_id is defined + + - name: Calico-rr | Fetch current node object + command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson" + changed_when: false + register: calico_rr_node + until: calico_rr_node is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + + - name: Calico-rr | Set route reflector cluster ID + set_fact: + calico_rr_node_patched: >- + {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': + { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }} + + - name: Calico-rr | Configure route reflector # noqa 301 305 + shell: "{{ bin_dir }}/calicoctl.sh replace -f-" + args: + stdin: "{{ calico_rr_node_patched | to_json }}" + + rescue: + - name: Fail if retry limit is reached + fail: + msg: Ended after 10 retries + when: retry_count|int == 10 + + - name: Retrying node configuration + debug: + msg: "Failed to configure route reflector - Retrying..." + + - name: Retry node configuration + include_tasks: update-node.yml diff --git a/kubespray/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml b/kubespray/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml new file mode 100644 index 0000000..fc336e4 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml @@ -0,0 +1,60 @@ +--- +- name: Calico | Check if calico apiserver exists + command: "{{ kubectl }} -n calico-apiserver get secret calico-apiserver-certs" + register: calico_apiserver_secret + changed_when: false + failed_when: false + +- name: Calico | Create ns manifests + template: + src: "calico-apiserver-ns.yml.j2" + dest: "{{ kube_config_dir }}/calico-apiserver-ns.yml" + mode: 0644 + +- name: Calico | Apply ns manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/calico-apiserver-ns.yml" + state: "latest" + +- name: Calico | Ensure calico certs dir + file: + path: /etc/calico/certs + state: directory + mode: 0755 + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Copy ssl script for apiserver certs + template: + src: make-ssl-calico.sh.j2 + dest: "{{ bin_dir }}/make-ssl-apiserver.sh" + mode: 0755 + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Copy ssl config for apiserver certs + copy: + src: openssl.conf + dest: /etc/calico/certs/openssl.conf + mode: 0644 + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Generate apiserver certs + command: >- + {{ bin_dir }}/make-ssl-apiserver.sh + -f /etc/calico/certs/openssl.conf + -c {{ kube_cert_dir }} + -d /etc/calico/certs + -s apiserver + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Create calico apiserver generic secrets + command: >- + {{ kubectl }} -n calico-apiserver + create secret generic {{ item.name }} + --from-file={{ item.cert }} + --from-file={{ item.key }} + with_items: + - name: calico-apiserver-certs + cert: /etc/calico/certs/apiserver.crt + key: /etc/calico/certs/apiserver.key + when: calico_apiserver_secret.rc != 0 diff --git a/kubespray/roles/network_plugin/calico/tasks/check.yml b/kubespray/roles/network_plugin/calico/tasks/check.yml new file mode 100644 index 0000000..530985f --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/check.yml @@ -0,0 +1,194 @@ +--- +- name: Stop if legacy encapsulation variables are detected (ipip) + assert: + that: + - ipip is not defined + msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if legacy encapsulation variables are detected (ipip_mode) + assert: + that: + - ipip_mode is not defined + msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks) + assert: + that: + - calcio_ipam_autoallocateblocks is not defined + msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + + +- name: Stop if incompatible network plugin and cloudprovider + assert: + that: + - calico_ipip_mode == 'Never' + - calico_vxlan_mode in ['Always', 'CrossSubnet'] + msg: "When using cloud_provider azure and network_plugin calico calico_ipip_mode must be 'Never' and calico_vxlan_mode 'Always' or 'CrossSubnet'" + when: + - cloud_provider is defined and cloud_provider == 'azure' + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if supported Calico versions + assert: + that: + - "calico_version in calico_crds_archive_checksums.keys()" + msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.keys() }}" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Get current calico cluster version + shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'" + args: + executable: /bin/bash + register: calico_version_on_server + async: 10 + poll: 3 + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + changed_when: false + failed_when: false + +- name: Check that current calico version is enough for upgrade + assert: + that: + - calico_version_on_server.stdout is version(calico_min_version_required, '>=') + msg: > + Your version of calico is not fresh enough for upgrade. + Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release. + when: + - 'calico_version_on_server.stdout is defined' + - calico_version_on_server.stdout + - inventory_hostname == groups['kube_control_plane'][0] + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check that cluster_id is set if calico_rr enabled" + assert: + that: + - cluster_id is defined + msg: "A unique cluster_id is required if using calico_rr" + when: + - peer_with_calico_rr + - inventory_hostname == groups['kube_control_plane'][0] + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check that calico_rr nodes are in k8s_cluster group" + assert: + that: + - '"k8s_cluster" in group_names' + msg: "calico_rr must be a child group of k8s_cluster group" + when: + - '"calico_rr" in group_names' + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check vars defined correctly" + assert: + that: + - "calico_pool_name is defined" + - "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')" + msg: "calico_pool_name contains invalid characters" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check calico network backend defined correctly" + assert: + that: + - "calico_network_backend in ['bird', 'vxlan', 'none']" + msg: "calico network backend is not 'bird', 'vxlan' or 'none'" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode defined correctly" + assert: + that: + - "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']" + - "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']" + msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode if simultaneously enabled" + assert: + that: + - "calico_vxlan_mode in ['Never']" + msg: "IP in IP and VXLAN mode is mutualy exclusive modes" + when: + - "calico_ipip_mode in ['Always', 'CrossSubnet']" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode if simultaneously enabled" + assert: + that: + - "calico_ipip_mode in ['Never']" + msg: "IP in IP and VXLAN mode is mutualy exclusive modes" + when: + - "calico_vxlan_mode in ['Always', 'CrossSubnet']" + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Get Calico {{ calico_pool_name }} configuration" + command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json" + failed_when: False + changed_when: False + check_mode: no + register: calico + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Set calico_pool_conf" + set_fact: + calico_pool_conf: '{{ calico.stdout | from_json }}' + when: calico.rc == 0 and calico.stdout + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check if inventory match current cluster configuration" + assert: + that: + - calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int) + - calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet)) + - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode + - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode + msg: "Your inventory doesn't match the current cluster configuration" + when: + - calico_pool_conf is defined + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check kdd calico_datastore if calico_apiserver_enabled" + assert: + that: calico_datastore == "kdd" + msg: "When using calico apiserver you need to use the kubernetes datastore" + when: + - calico_apiserver_enabled + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check kdd calico_datastore if typha_enabled" + assert: + that: calico_datastore == "kdd" + msg: "When using typha you need to use the kubernetes datastore" + when: + - typha_enabled + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip mode is Never for calico ipv6" + assert: + that: + - "calico_ipip_mode_ipv6 in ['Never']" + msg: "Calico doesn't support ipip tunneling for the IPv6" + when: + - enable_dual_stack_networks + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/kubespray/roles/network_plugin/calico/tasks/install.yml b/kubespray/roles/network_plugin/calico/tasks/install.yml new file mode 100644 index 0000000..d55c910 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/install.yml @@ -0,0 +1,475 @@ +--- +- name: Calico | Install Wireguard packages + package: + name: "{{ item }}" + state: present + with_items: "{{ calico_wireguard_packages }}" + register: calico_package_install + until: calico_package_install is succeeded + retries: 4 + when: calico_wireguard_enabled + +- name: Calico | Copy calicoctl binary from download dir + copy: + src: "{{ local_release_dir }}/calicoctl" + dest: "{{ bin_dir }}/calicoctl" + mode: 0755 + remote_src: yes + +- name: Calico | Write Calico cni config + template: + src: "cni-calico.conflist.j2" + dest: "/etc/cni/net.d/calico.conflist.template" + mode: 0644 + owner: root + register: calico_conflist + notify: reset_calico_cni + +- name: Calico | Create calico certs directory + file: + dest: "{{ calico_cert_dir }}" + state: directory + mode: 0750 + owner: root + group: root + when: calico_datastore == "etcd" + +- name: Calico | Link etcd certificates for calico-node + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ calico_cert_dir }}/{{ item.d }}" + state: hard + mode: 0640 + force: yes + with_items: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + when: calico_datastore == "etcd" + +- name: Calico | Generate typha certs + include_tasks: typha_certs.yml + when: + - typha_secure + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Generate apiserver certs + include_tasks: calico_apiserver_certs.yml + when: + - calico_apiserver_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Install calicoctl wrapper script + template: + src: "calicoctl.{{ calico_datastore }}.sh.j2" + dest: "{{ bin_dir }}/calicoctl.sh" + mode: 0755 + owner: root + group: root + +- name: Calico | wait for etcd + uri: + url: "{{ etcd_access_addresses.split(',') | first }}/health" + validate_certs: no + client_cert: "{{ calico_cert_dir }}/cert.crt" + client_key: "{{ calico_cert_dir }}/key.pem" + register: result + until: result.status == 200 or result.status == 401 + retries: 10 + delay: 5 + run_once: true + when: calico_datastore == "etcd" + +- name: Calico | Check if calico network pool has already been configured + # noqa 306 - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf + retries: 4 + until: calico_conf.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined + assert: + that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1" + msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - 'calico_conf.stdout == "0"' + - calico_pool_cidr is defined + +- name: Calico | Check if calico IPv6 network pool has already been configured + # noqa 306 - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf_ipv6 + retries: 4 + until: calico_conf_ipv6.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + - enable_dual_stack_networks + +- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined + assert: + that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1" + msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" + - calico_pool_cidr_ipv6 is defined + - enable_dual_stack_networks + +- block: + - name: Calico | Check if extra directory is needed + stat: + path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}" + register: kdd_path + - name: Calico | Set kdd path when calico < v3.22.3 + set_fact: + calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" + when: + - calico_version is version('v3.22.3', '<') + - name: Calico | Set kdd path when calico > v3.22.2 + set_fact: + calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" + when: + - calico_version is version('v3.22.2', '>') + - name: Calico | Create calico manifests for kdd + assemble: + src: "{{ calico_kdd_path }}" + dest: "{{ kube_config_dir }}/kdd-crds.yml" + mode: 0644 + delimiter: "---\n" + regexp: ".*\\.yaml" + remote_src: true + + - name: Calico | Create Calico Kubernetes datastore resources + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/kdd-crds.yml" + state: "latest" + when: + - inventory_hostname == groups['kube_control_plane'][0] + when: + - inventory_hostname in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- block: + - name: Calico | Get existing FelixConfiguration + command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json" + register: _felix_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray FelixConfiguration + set_fact: + _felix_config: > + { + "kind": "FelixConfiguration", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "default", + }, + "spec": { + "ipipEnabled": {{ calico_ipip_mode != 'Never' }}, + "reportingInterval": "{{ calico_felix_reporting_interval }}", + "bpfLogLevel": "{{ calico_bpf_log_level }}", + "bpfEnabled": {{ calico_bpf_enabled | bool }}, + "bpfExternalServiceMode": "{{ calico_bpf_service_mode }}", + "wireguardEnabled": {{ calico_wireguard_enabled | bool }}, + "logSeverityScreen": "{{ calico_felix_log_severity_screen }}", + "vxlanEnabled": {{ calico_vxlan_mode != 'Never' }}, + "featureDetectOverride": "{{ calico_feature_detect_override }}" + } + } + + - name: Calico | Process FelixConfiguration + set_fact: + _felix_config: "{{ _felix_cmd.stdout | from_json | combine(_felix_config, recursive=True) }}" + when: + - _felix_cmd is success + + - name: Calico | Configure calico FelixConfiguration + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- block: + - name: Calico | Get existing calico network pool + command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json" + register: _calico_pool_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray calico network pool + set_fact: + _calico_pool: > + { + "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize | default(kube_network_node_prefix) }}, + "cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}", + "ipipMode": "{{ calico_ipip_mode }}", + "vxlanMode": "{{ calico_vxlan_mode }}", + "natOutgoing": {{ nat_outgoing|default(false) }} + } + } + + - name: Calico | Process calico network pool + set_fact: + _calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, recursive=True) }}" + when: + - _calico_pool_cmd is success + + - name: Calico | Configure calico network pool + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- block: + - name: Calico | Get existing calico ipv6 network pool + command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" + register: _calico_pool_ipv6_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray calico network pool + set_fact: + _calico_pool_ipv6: > + { + "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}-ipv6", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }}, + "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}", + "ipipMode": "{{ calico_ipip_mode_ipv6 }}", + "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", + "natOutgoing": {{ nat_outgoing_ipv6|default(false) }} + } + } + + - name: Calico | Process calico ipv6 network pool + set_fact: + _calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, recursive=True) }}" + when: + - _calico_pool_ipv6_cmd is success + + - name: Calico | Configure calico ipv6 network pool + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + - enable_dual_stack_networks | bool + +- name: Populate Service External IPs + set_fact: + _service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}" + with_items: "{{ calico_advertise_service_external_ips }}" + run_once: yes + +- name: Populate Service LoadBalancer IPs + set_fact: + _service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}" + with_items: "{{ calico_advertise_service_loadbalancer_ips }}" + run_once: yes + +- name: "Determine nodeToNodeMesh needed state" + set_fact: + nodeToNodeMeshEnabled: "false" + when: + - peer_with_router|default(false) or peer_with_calico_rr|default(false) + - inventory_hostname in groups['k8s_cluster'] + run_once: yes + +- block: + - name: Calico | Get existing BGP Configuration + command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json" + register: _bgp_config_cmd + ignore_errors: True + changed_when: False + + - name: Calico | Set kubespray BGP Configuration + set_fact: + _bgp_config: > + { + "kind": "BGPConfiguration", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "default", + }, + "spec": { + "listenPort": {{ calico_bgp_listen_port }}, + "logSeverityScreen": "Info", + {% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %} + "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} , + {% if calico_advertise_cluster_ips|default(false) %} + "serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %} + {% if calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} + "serviceExternalIPs": {{ _service_external_ips|default([]) }} + } + } + + - name: Calico | Process BGP Configuration + set_fact: + _bgp_config: "{{ _bgp_config_cmd.stdout | from_json | combine(_bgp_config, recursive=True) }}" + when: + - _bgp_config_cmd is success + + - name: Calico | Set up BGP Configuration + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}" + changed_when: False + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Create calico manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico-config, file: calico-config.yml, type: cm} + - {name: calico-node, file: calico-node.yml, type: ds} + - {name: calico, file: calico-node-sa.yml, type: sa} + - {name: calico, file: calico-cr.yml, type: clusterrole} + - {name: calico, file: calico-crb.yml, type: clusterrolebinding} + - {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm } + register: calico_node_manifests + when: + - inventory_hostname in groups['kube_control_plane'] + - rbac_enabled or item.type not in rbac_resources + +- name: Calico | Create calico manifests for typha + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico, file: calico-typha.yml, type: typha} + register: calico_node_typha_manifest + when: + - inventory_hostname in groups['kube_control_plane'] + - typha_enabled + +- name: Calico | get calico apiserver caBundle + command: "{{ bin_dir }}/kubectl get secret -n calico-apiserver calico-apiserver-certs -o jsonpath='{.data.apiserver\\.crt}'" + changed_when: false + register: calico_apiserver_cabundle + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_apiserver_enabled + +- name: Calico | set calico apiserver caBundle fact + set_fact: + calico_apiserver_cabundle: "{{ calico_apiserver_cabundle.stdout }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_apiserver_enabled + +- name: Calico | Create calico manifests for apiserver + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico, file: calico-apiserver.yml, type: calico-apiserver} + register: calico_apiserver_manifest + when: + - inventory_hostname in groups['kube_control_plane'] + - calico_apiserver_enabled + +- name: Start Calico resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_node_manifests.results }}" + - "{{ calico_node_typha_manifest.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + +- name: Start Calico apiserver resources + kube: + name: "{{ item.item.name }}" + namespace: "calico-apiserver" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_apiserver_manifest.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + +- name: Wait for calico kubeconfig to be created + wait_for: + path: /etc/cni/net.d/calico-kubeconfig + when: + - inventory_hostname not in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- name: Calico | Create Calico ipam manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: calico, file: calico-ipamconfig.yml, type: ipam} + when: + - inventory_hostname in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- name: Calico | Create ipamconfig resources + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/calico-ipamconfig.yml" + state: "latest" + register: resource_result + until: resource_result is succeeded + retries: 4 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_datastore == "kdd" + +- include_tasks: peer_with_calico_rr.yml + when: + - peer_with_calico_rr|default(false) + +- include_tasks: peer_with_router.yml + when: + - peer_with_router|default(false) diff --git a/kubespray/roles/network_plugin/calico/tasks/main.yml b/kubespray/roles/network_plugin/calico/tasks/main.yml new file mode 100644 index 0000000..81844fa --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- import_tasks: pre.yml + +- import_tasks: repos.yml + +- include_tasks: install.yml diff --git a/kubespray/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/kubespray/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml new file mode 100644 index 0000000..efa98c5 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml @@ -0,0 +1,86 @@ +--- +- name: Calico | Set lable for groups nodes # noqa 301 305 + shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite" + changed_when: false + register: calico_group_id_label + until: calico_group_id_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + when: + - calico_group_id is defined + +- name: Calico | Configure peering with route reflectors at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "{{ calico_rr_id }}-to-node" + }, + "spec": { + "peerSelector": "calico-rr-id == '{{ calico_rr_id }}'", + "nodeSelector": "calico-group-id == '{{ calico_group_id }}'" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - calico_rr_id is defined + - calico_group_id is defined + - inventory_hostname in groups['calico_rr'] + +- name: Calico | Configure peering with route reflectors at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "peer-to-rrs" + }, + "spec": { + "nodeSelector": "!has(i-am-a-route-reflector)", + "peerSelector": "has(i-am-a-route-reflector)" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ groups['calico_rr'] | default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_rr_id is not defined or calico_group_id is not defined + +- name: Calico | Configure route reflectors to peer with each other + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "rr-mesh" + }, + "spec": { + "nodeSelector": "has(i-am-a-route-reflector)", + "peerSelector": "has(i-am-a-route-reflector)" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ groups['calico_rr'] | default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/network_plugin/calico/tasks/peer_with_router.yml b/kubespray/roles/network_plugin/calico/tasks/peer_with_router.yml new file mode 100644 index 0000000..a698ed1 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/peer_with_router.yml @@ -0,0 +1,77 @@ +--- +- name: Calico | Configure peering with router(s) at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}" + }, + "spec": { + "asNumber": "{{ item.as }}", + "peerIP": "{{ item.router_id }}" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Configure node asNumber for per node peering + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "Node", + "metadata": { + "name": "{{ inventory_hostname }}" + }, + "spec": { + "bgp": { + "asNumber": "{{ local_as }}" + }, + "orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}] + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - inventory_hostname in groups['k8s_cluster'] + - local_as is defined + - groups['calico_rr'] | default([]) | length == 0 + +- name: Calico | Configure peering with router(s) at node scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}" + }, + "spec": { + "asNumber": "{{ item.as }}", + "node": "{{ inventory_hostname }}", + "peerIP": "{{ item.router_id }}", + "sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - inventory_hostname in groups['k8s_cluster'] diff --git a/kubespray/roles/network_plugin/calico/tasks/pre.yml b/kubespray/roles/network_plugin/calico/tasks/pre.yml new file mode 100644 index 0000000..162aca1 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/pre.yml @@ -0,0 +1,46 @@ +--- +- name: Slurp CNI config + slurp: + src: /etc/cni/net.d/10-calico.conflist + register: calico_cni_config_slurp + failed_when: false + +- block: + - name: Set fact calico_cni_config from slurped CNI config + set_fact: + calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}" + - name: Set fact calico_datastore to etcd if needed + set_fact: + calico_datastore: etcd + when: + - "'plugins' in calico_cni_config" + - "'etcd_endpoints' in calico_cni_config.plugins.0" + when: calico_cni_config_slurp.content is defined + +- name: Calico | Get kubelet hostname + shell: >- + set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' + | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 + args: + executable: /bin/bash + register: calico_kubelet_name + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - "cloud_provider is defined" + +- name: Calico | Gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true + tags: + - facts diff --git a/kubespray/roles/network_plugin/calico/tasks/repos.yml b/kubespray/roles/network_plugin/calico/tasks/repos.yml new file mode 100644 index 0000000..dd29f45 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/repos.yml @@ -0,0 +1,21 @@ +--- +- name: Calico | Add wireguard yum repo + when: + - calico_wireguard_enabled + block: + + - name: Calico | Add wireguard yum repo + yum_repository: + name: copr:copr.fedorainfracloud.org:jdoss:wireguard + file: _copr:copr.fedorainfracloud.org:jdoss:wireguard + description: Copr repo for wireguard owned by jdoss + baseurl: "{{ calico_wireguard_repo }}" + gpgcheck: yes + gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg + skip_if_unavailable: yes + enabled: yes + repo_gpgcheck: no + when: + - ansible_os_family in ['RedHat'] + - ansible_distribution not in ['Fedora'] + - ansible_facts['distribution_major_version'] | int < 9 diff --git a/kubespray/roles/network_plugin/calico/tasks/reset.yml b/kubespray/roles/network_plugin/calico/tasks/reset.yml new file mode 100644 index 0000000..48d2e5a --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/reset.yml @@ -0,0 +1,30 @@ +--- +- name: reset | check vxlan.calico network device + stat: + path: /sys/class/net/vxlan.calico + get_attributes: no + get_checksum: no + get_mime: no + register: vxlan + +- name: reset | remove the network vxlan.calico device created by calico + command: ip link del vxlan.calico + when: vxlan.stat.exists + +- name: reset | check dummy0 network device + stat: + path: /sys/class/net/dummy0 + get_attributes: no + get_checksum: no + get_mime: no + register: dummy0 + +- name: reset | remove the network device created by calico + command: ip link del dummy0 + when: dummy0.stat.exists + +- name: reset | get and remove remaining routes set by bird + shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird " + args: + executable: /bin/bash + changed_when: false diff --git a/kubespray/roles/network_plugin/calico/tasks/typha_certs.yml b/kubespray/roles/network_plugin/calico/tasks/typha_certs.yml new file mode 100644 index 0000000..5d3f279 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/tasks/typha_certs.yml @@ -0,0 +1,51 @@ +--- +- name: Calico | Check if typha-server exists + command: "{{ kubectl }} -n kube-system get secret typha-server" + register: typha_server_secret + changed_when: false + failed_when: false + +- name: Calico | Ensure calico certs dir + file: + path: /etc/calico/certs + state: directory + mode: 0755 + when: typha_server_secret.rc != 0 + +- name: Calico | Copy ssl script for typha certs + template: + src: make-ssl-calico.sh.j2 + dest: "{{ bin_dir }}/make-ssl-typha.sh" + mode: 0755 + when: typha_server_secret.rc != 0 + +- name: Calico | Copy ssl config for typha certs + copy: + src: openssl.conf + dest: /etc/calico/certs/openssl.conf + mode: 0644 + when: typha_server_secret.rc != 0 + +- name: Calico | Generate typha certs + command: >- + {{ bin_dir }}/make-ssl-typha.sh + -f /etc/calico/certs/openssl.conf + -c {{ kube_cert_dir }} + -d /etc/calico/certs + -s typha + when: typha_server_secret.rc != 0 + +- name: Calico | Create typha tls secrets + command: >- + {{ kubectl }} -n kube-system + create secret tls {{ item.name }} + --cert {{ item.cert }} + --key {{ item.key }} + with_items: + - name: typha-server + cert: /etc/calico/certs/typha-server.crt + key: /etc/calico/certs/typha-server.key + - name: typha-client + cert: /etc/calico/certs/typha-client.crt + key: /etc/calico/certs/typha-client.key + when: typha_server_secret.rc != 0 diff --git a/kubespray/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 new file mode 100644 index 0000000..a1bdfcb --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 @@ -0,0 +1,10 @@ +# This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change +# or be removed in future releases without further warning. +# +# Namespace and namespace-scoped resources. +apiVersion: v1 +kind: Namespace +metadata: + labels: + name: calico-apiserver + name: calico-apiserver diff --git a/kubespray/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 new file mode 100644 index 0000000..dabc7a3 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 @@ -0,0 +1,287 @@ +# Policy to ensure the API server isn't cut off. Can be modified, but ensure +# that the main API server is always able to reach the Calico API server. +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-apiserver + namespace: calico-apiserver +spec: + podSelector: + matchLabels: + apiserver: "true" + ingress: + - ports: + - protocol: TCP + port: 5443 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: calico-api + namespace: calico-apiserver +spec: + ports: + - name: apiserver + port: 443 + protocol: TCP + targetPort: 5443 + selector: + apiserver: "true" + type: ClusterIP + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + apiserver: "true" + k8s-app: calico-apiserver + name: calico-apiserver + namespace: calico-apiserver +spec: + replicas: 1 + selector: + matchLabels: + apiserver: "true" + strategy: + type: Recreate + template: + metadata: + labels: + apiserver: "true" + k8s-app: calico-apiserver + name: calico-apiserver + namespace: calico-apiserver + spec: + containers: + - args: + - --secure-port=5443 + env: + - name: DATASTORE_TYPE + value: kubernetes + image: {{ calico_apiserver_image_repo }}:{{ calico_apiserver_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + livenessProbe: + httpGet: + path: /version + port: 5443 + scheme: HTTPS + initialDelaySeconds: 90 + periodSeconds: 10 + name: calico-apiserver + readinessProbe: + exec: + command: + - /code/filecheck + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + privileged: false + runAsUser: 0 + volumeMounts: + - mountPath: /code/apiserver.local.config/certificates + name: calico-apiserver-certs + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + restartPolicy: Always + serviceAccount: calico-apiserver + serviceAccountName: calico-apiserver + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - name: calico-apiserver-certs + secret: + secretName: calico-apiserver-certs + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-apiserver + namespace: calico-apiserver + +--- + +# Cluster-scoped resources below here. +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v3.projectcalico.org +spec: + group: projectcalico.org + groupPriorityMinimum: 1500 + caBundle: {{ calico_apiserver_cabundle }} + service: + name: calico-api + namespace: calico-apiserver + port: 443 + version: v3 + versionPriority: 200 + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-crds +rules: +- apiGroups: + - extensions + - networking.k8s.io + - "" + resources: + - networkpolicies + - nodes + - namespaces + - pods + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - globalnetworkpolicies + - networkpolicies + - clusterinformations + - hostendpoints + - globalnetworksets + - networksets + - bgpconfigurations + - bgppeers + - felixconfigurations + - kubecontrollersconfigurations + - ippools + - ipreservations + - ipamblocks + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - policy + resourceNames: + - calico-apiserver + resources: + - podsecuritypolicies + verbs: + - use + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-extension-apiserver-auth-access +rules: +- apiGroups: + - "" + resourceNames: + - extension-apiserver-authentication + resources: + - configmaps + verbs: + - list + - watch + - get +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-webhook-reader +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-access-crds +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-crds +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-delegate-auth +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-webhook-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-webhook-reader +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-extension-apiserver-auth-access +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-extension-apiserver-auth-access +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver diff --git a/kubespray/roles/network_plugin/calico/templates/calico-config.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-config.yml.j2 new file mode 100644 index 0000000..568cc00 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -0,0 +1,27 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: +{% if calico_datastore == "etcd" %} + etcd_endpoints: "{{ etcd_access_addresses }}" + etcd_ca: "/calico-secrets/ca_cert.crt" + etcd_cert: "/calico-secrets/cert.crt" + etcd_key: "/calico-secrets/key.pem" +{% elif calico_datastore == "kdd" and typha_enabled %} + # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas + # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is + # essential. + typha_service_name: "calico-typha" +{% endif %} +{% if calico_network_backend == 'bird' %} + cluster_type: "kubespray,bgp" + calico_backend: "bird" +{% else %} + cluster_type: "kubespray" + calico_backend: "{{ calico_network_backend }}" +{% endif %} +{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %} + as: "{{ local_as|default(global_as_num) }}" +{% endif -%} diff --git a/kubespray/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-cr.yml.j2 new file mode 100644 index 0000000..b911b87 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -0,0 +1,168 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + - configmaps + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + - watch + - list +{% if calico_datastore == "kdd" %} + # Used to discover Typhas. + - get +{% endif %} + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch +{% if calico_datastore == "etcd" %} + - apiGroups: + - policy + resourceNames: + - privileged + resources: + - podsecuritypolicies + verbs: + - use +{% elif calico_datastore == "kdd" %} + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +{% endif %} + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create diff --git a/kubespray/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-crb.yml.j2 new file mode 100644 index 0000000..f747bfd --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system diff --git a/kubespray/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 new file mode 100644 index 0000000..af7e211 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: crd.projectcalico.org/v1 +kind: IPAMConfig +metadata: + name: default +spec: + autoAllocateBlocks: {{ calico_ipam_autoallocateblocks }} + strictAffinity: {{ calico_ipam_strictaffinity }} + maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }} diff --git a/kubespray/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 new file mode 100644 index 0000000..ea721b3 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system diff --git a/kubespray/roles/network_plugin/calico/templates/calico-node.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-node.yml.j2 new file mode 100644 index 0000000..3af01c8 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -0,0 +1,464 @@ +--- +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: +{% if calico_datastore == "etcd" %} + kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}" +{% endif %} +{% if calico_felix_prometheusmetricsenabled %} + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}" +{% endif %} + spec: + nodeSelector: + {{ calico_ds_nodeselector }} + priorityClassName: system-node-critical + hostNetwork: true + serviceAccountName: calico-node + tolerations: + - operator: Exists + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + initContainers: +{% if calico_datastore == "kdd" %} + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true +{% endif %} + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # Install CNI binaries + - name: UPDATE_CNI_BINARIES + value: "true" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG_FILE + value: "/host/etc/cni/net.d/calico.conflist.template" + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" +{% if calico_datastore == "kdd" %} + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {{ calico_flexvol_image_repo }}:{{ calico_flexvol_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The location of the Calico etcd cluster. +{% if calico_datastore == "etcd" %} + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert +{% elif calico_datastore == "kdd" %} + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" +{% if typha_enabled %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name +{% if typha_secure %} + - name: FELIX_TYPHACN + value: typha-server + - name: FELIX_TYPHACAFILE + value: /etc/typha-ca/ca.crt + - name: FELIX_TYPHACERTFILE + value: /etc/typha-client/typha-client.crt + - name: FELIX_TYPHAKEYFILE + value: /etc/typha-client/typha-client.key +{% endif %} +{% endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" +{% endif %} +{% if calico_network_backend == 'vxlan' %} + - name: FELIX_VXLANVNI + value: "{{ calico_vxlan_vni }}" + - name: FELIX_VXLANPORT + value: "{{ calico_vxlan_port }}" +{% endif %} + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + valueFrom: + configMapKeyRef: + name: calico-config + key: cluster_type + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ calico_endpoint_to_host_action|default('RETURN') }}" + - name: FELIX_HEALTHHOST + value: "{{ calico_healthhost }}" +{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %} + - name: FELIX_KUBENODEPORTRANGES + value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}" +{% endif %} + - name: FELIX_IPTABLESBACKEND + value: "{{ calico_iptables_backend }}" + - name: FELIX_IPTABLESLOCKTIMEOUTSECS + value: "{{ calico_iptables_lock_timeout_secs }}" +# should be set in etcd before deployment +# # Configure the IP Pool from which Pod IPs will be chosen. +# - name: CALICO_IPV4POOL_CIDR +# value: "{{ calico_pool_cidr | default(kube_pods_subnet) }}" + - name: CALICO_IPV4POOL_IPIP + value: "{{ calico_ipv4pool_ipip }}" + - name: FELIX_IPV6SUPPORT + value: "{{ enable_dual_stack_networks | default(false) }}" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ calico_loglevel }}" + # Set Calico startup logging to "error" + - name: CALICO_STARTUP_LOGLEVEL + value: "{{ calico_node_startup_loglevel }}" + # Enable or disable usage report + - name: FELIX_USAGEREPORTINGENABLED + value: "{{ calico_usage_reporting }}" + # Set MTU for tunnel device used if ipip is enabled +{% if calico_mtu is defined %} + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" +{% endif %} + - name: FELIX_CHAININSERTMODE + value: "{{ calico_felix_chaininsertmode }}" + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "{{ calico_felix_prometheusmetricsenabled }}" + - name: FELIX_PROMETHEUSMETRICSPORT + value: "{{ calico_felix_prometheusmetricsport }}" + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "{{ calico_felix_prometheusgometricsenabled }}" + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "{{ calico_felix_prometheusprocessmetricsenabled }}" +{% if calico_ip_auto_method is defined %} + - name: IP_AUTODETECTION_METHOD + value: "{{ calico_ip_auto_method }}" +{% else %} + - name: NODEIP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: IP_AUTODETECTION_METHOD + value: "can-reach=$(NODEIP)" +{% endif %} + - name: IP + value: "autodetect" +{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %} + - name: IP6_AUTODETECTION_METHOD + value: "{{ calico_ip6_auto_method }}" +{% endif %} +{% if calico_felix_mtu_iface_pattern is defined %} + - name: FELIX_MTUIFACEPATTERN + value: "{{ calico_felix_mtu_iface_pattern }}" +{% endif %} +{% if enable_dual_stack_networks %} + - name: IP6 + value: autodetect +{% endif %} +{% if calico_use_default_route_src_ipaddr|default(false) %} + - name: FELIX_DEVICEROUTESOURCEADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_IGNORELOOSERPF + value: "{{ calico_node_ignorelooserpf }}" + - name: CALICO_MANAGE_CNI + value: "true" +{% if calico_node_extra_envs is defined %} +{% for key in calico_node_extra_envs %} + - name: {{ key }} + value: "{{ calico_node_extra_envs[key] }}" +{% endfor %} +{% endif %} + securityContext: + privileged: true + resources: + limits: + cpu: {{ calico_node_cpu_limit }} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live +{% if calico_network_backend == "bird" %} + - -bird-live +{% endif %} + periodSeconds: 10 + initialDelaySeconds: 10 + timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }} + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node +{% if calico_network_backend == "bird" %} + - -bird-ready +{% endif %} + - -felix-ready + periodSeconds: 10 + timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }} + failureThreshold: 6 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false +{% if calico_datastore == "etcd" %} + - mountPath: /calico-secrets + name: etcd-certs + readOnly: true +{% endif %} + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false +{% if typha_secure %} + - name: typha-client + mountPath: /etc/typha-client + readOnly: true + - name: typha-cacert + subPath: ca.crt + mountPath: /etc/typha-ca/ca.crt + readOnly: true +{% endif %} + - name: policysync + mountPath: /var/run/nodeagent +{% if calico_bpf_enabled %} + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: sysfs + mountPath: /sys/fs/ + # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. + # If the host is known to mount that filesystem already then Bidirectional can be omitted. + mountPropagation: Bidirectional +{% endif %} + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + # Used to install CNI. + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin +{% if calico_datastore == "etcd" %} + # Mount in the etcd TLS secrets. + - name: etcd-certs + hostPath: + path: "{{ calico_cert_dir }}" +{% endif %} + # Mount the global iptables lock file, used by calico/node + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if calico_datastore == "kdd" %} + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks +{% endif %} +{% if typha_enabled and typha_secure %} + - name: typha-client + secret: + secretName: typha-client + items: + - key: tls.crt + path: typha-client.crt + - key: tls.key + path: typha-client.key + - name: typha-cacert + hostPath: + path: "/etc/kubernetes/ssl/" +{% endif %} +{% if calico_bpf_enabled %} + - name: sysfs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate +{% endif %} + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: "{{ kubelet_flexvolumes_plugins_dir | default('/usr/libexec/kubernetes/kubelet-plugins/volume/exec') }}/nodeagent~uds" + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/kubespray/roles/network_plugin/calico/templates/calico-typha.yml.j2 b/kubespray/roles/network_plugin/calico/templates/calico-typha.yml.j2 new file mode 100644 index 0000000..22d2f2c --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calico-typha.yml.j2 @@ -0,0 +1,190 @@ +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha +{% if typha_prometheusmetricsenabled %} + - port: {{ typha_prometheusmetricsport }} + protocol: TCP + targetPort: http-metrics + name: metrics +{% endif %} + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ typha_replicas }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' +{% if typha_prometheusmetricsenabled %} + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ typha_prometheusmetricsport }}" +{% endif %} + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP +{% if typha_prometheusmetricsenabled %} + - containerPort: {{ typha_prometheusmetricsport }} + name: http-metrics + protocol: TCP +{% endif %} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + - name: TYPHA_MAXCONNECTIONSLOWERLIMIT + value: "{{ typha_max_connections_lower_limit }}" +{% if typha_secure %} + - name: TYPHA_CAFILE + value: /etc/ca/ca.crt + - name: TYPHA_CLIENTCN + value: typha-client + - name: TYPHA_SERVERCERTFILE + value: /etc/typha/server_certificate.pem + - name: TYPHA_SERVERKEYFILE + value: /etc/typha/server_key.pem +{% endif %} +{% if typha_prometheusmetricsenabled %} + # Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "true" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{ typha_prometheusmetricsport }}" +{% endif %} +{% if typha_secure %} + volumeMounts: + - mountPath: /etc/typha + name: typha-server + readOnly: true + - mountPath: /etc/ca/ca.crt + subPath: ca.crt + name: cacert + readOnly: true +{% endif %} + # Needed for version >=3.7 when the 'host-local' ipam is used + # Should never happen given templates/cni-calico.conflist.j2 + # Configure route aggregation based on pod CIDR. + # - name: USE_POD_CIDR + # value: "true" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 +{% if typha_secure %} + volumes: + - name: typha-server + secret: + secretName: typha-server + items: + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + - name: cacert + hostPath: + path: "{{ kube_cert_dir }}" +{% endif %} + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha diff --git a/kubespray/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 b/kubespray/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 new file mode 100644 index 0000000..fcde4a5 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 @@ -0,0 +1,6 @@ +#!/bin/bash +ETCD_ENDPOINTS={{ etcd_access_addresses }} \ +ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ +ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ +ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ +{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/kubespray/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 b/kubespray/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 new file mode 100644 index 0000000..ef89f39 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 @@ -0,0 +1,8 @@ +#!/bin/bash +DATASTORE_TYPE=kubernetes \ +{% if inventory_hostname in groups['kube_control_plane'] %} +KUBECONFIG=/etc/kubernetes/admin.conf \ +{% else %} +KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \ +{% endif %} +{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/kubespray/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/kubespray/roles/network_plugin/calico/templates/cni-calico.conflist.j2 new file mode 100644 index 0000000..5cdf1ac --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/cni-calico.conflist.j2 @@ -0,0 +1,86 @@ +{ + "name": "{{ calico_cni_name }}", + "cniVersion":"0.3.1", + "plugins":[ + { +{% if calico_datastore == "kdd" %} + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", +{% else %} +{% if cloud_provider is defined %} + "nodename": "{{ calico_kubelet_name.stdout }}", +{% else %} + "nodename": "{{ calico_baremetal_nodename }}", +{% endif %} +{% endif %} + "type": "calico", + "log_level": "info", +{% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", +{% endif %} +{% if calico_datastore == "etcd" %} + "etcd_endpoints": "{{ etcd_access_addresses }}", + "etcd_cert_file": "{{ calico_cert_dir }}/cert.crt", + "etcd_key_file": "{{ calico_cert_dir }}/key.pem", + "etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt", +{% endif %} +{% if calico_ipam_host_local is defined %} + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, +{% else %} + "ipam": { + "type": "calico-ipam", +{% if enable_dual_stack_networks %} + "assign_ipv6": "true", +{% if calico_cni_pool_ipv6 %} + "ipv6_pools": ["{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"], +{% endif %} +{% endif %} +{% if calico_cni_pool %} + "ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"], +{% endif %} + "assign_ipv4": "true" + }, +{% endif %} +{% if calico_allow_ip_forwarding %} + "container_settings": { + "allow_ip_forwarding": true + }, +{% endif %} +{% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %} + "feature_control": { + {% for fc in calico_feature_control -%} + {% set fcval = calico_feature_control[fc] -%} + "{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }} + {% endfor -%} + {{- "" }} + }, +{% endif %} +{% if enable_network_policy %} + "policy": { + "type": "k8s" + }, +{% endif %} +{% if calico_mtu is defined and calico_mtu is number %} + "mtu": {{ calico_mtu }}, +{% endif %} + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type":"portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type":"bandwidth", + "capabilities": { + "bandwidth": true + } + } + ] +} diff --git a/kubespray/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 b/kubespray/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 new file mode 100644 index 0000000..7ececd4 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: kube-system + name: kubernetes-services-endpoint +data: +{% if calico_bpf_enabled %} +{% if loadbalancer_apiserver is defined %} + KUBERNETES_SERVICE_HOST: "{{ apiserver_loadbalancer_domain_name }}" + KUBERNETES_SERVICE_PORT: "{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}" +{%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool %} + KUBERNETES_SERVICE_HOST: "127.0.0.1" + KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}" +{%- else %} + KUBERNETES_SERVICE_HOST: "{{ first_kube_control_plane_address }}" + KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}" +{%- endif %} +{% endif %} diff --git a/kubespray/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 b/kubespray/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 new file mode 100644 index 0000000..94b2022 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 @@ -0,0 +1,102 @@ +#!/bin/bash + +# Author: Smana smainklh@gmail.com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail +usage() +{ + cat << EOF +Create self signed certificates + +Usage : $(basename $0) -f [-d ] + -h | --help : Show this message + -f | --config : Openssl configuration file + -d | --ssldir : Directory where the certificates will be installed + -c | --cadir : Directory where the existing CA is located + -s | --service : Service for the ca + + ex : + $(basename $0) -f openssl.conf -d /srv/ssl +EOF +} + +# Options parsing +while (($#)); do + case "$1" in + -h | --help) usage; exit 0;; + -f | --config) CONFIG=${2}; shift 2;; + -d | --ssldir) SSLDIR="${2}"; shift 2;; + -c | --cadir) CADIR="${2}"; shift 2;; + -s | --service) SERVICE="${2}"; shift 2;; + *) + usage + echo "ERROR : Unknown option" + exit 3 + ;; + esac +done + +if [ -z ${CONFIG} ]; then + echo "ERROR: the openssl configuration file is missing. option -f" + exit 1 +fi +if [ -z ${SSLDIR} ]; then + SSLDIR="/etc/calico/certs" +fi + +tmpdir=$(mktemp -d /tmp/calico_${SERVICE}_certs.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +mkdir -p ${SSLDIR} ${CADIR} + +# Root CA +if [ -e "$CADIR/ca.key" ]; then + # Reuse existing CA + cp $CADIR/{ca.crt,ca.key} . +else + openssl genrsa -out ca.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca.key -days {{certificates_duration}} -out ca.crt -subj "/CN=calico-${SERVICE}-ca" > /dev/null 2>&1 +fi + +if [ $SERVICE == "typha" ]; then + # Typha server + openssl genrsa -out typha-server.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key typha-server.key -out typha-server.csr -subj "/CN=typha-server" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in typha-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-server.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + + # Typha client + openssl genrsa -out typha-client.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key typha-client.key -out typha-client.csr -subj "/CN=typha-client" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in typha-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-client.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + +elif [ $SERVICE == "apiserver" ]; then + # calico-apiserver + openssl genrsa -out apiserver.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=calico-apiserver" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver.crt -days {{certificates_duration}} -extensions ssl_client_apiserver -extfile ${CONFIG} > /dev/null 2>&1 +else + echo "ERROR: the openssl configuration file is missing. option -s" + exit 1 +fi + +# Install certs +if [ -e "$CADIR/ca.key" ]; then + # No pass existing CA + rm -f ca.crt ca.key +fi + +mv {*.crt,*.key} ${SSLDIR}/ diff --git a/kubespray/roles/network_plugin/calico/vars/amazon.yml b/kubespray/roles/network_plugin/calico/vars/amazon.yml new file mode 100644 index 0000000..83efdcd --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/amazon.yml @@ -0,0 +1,5 @@ +--- +calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/ +calico_wireguard_packages: + - wireguard-dkms + - wireguard-tools diff --git a/kubespray/roles/network_plugin/calico/vars/centos-9.yml b/kubespray/roles/network_plugin/calico/vars/centos-9.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/centos-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/roles/network_plugin/calico/vars/debian.yml b/kubespray/roles/network_plugin/calico/vars/debian.yml new file mode 100644 index 0000000..baf603c --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/debian.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard diff --git a/kubespray/roles/network_plugin/calico/vars/fedora.yml b/kubespray/roles/network_plugin/calico/vars/fedora.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/fedora.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/roles/network_plugin/calico/vars/opensuse.yml b/kubespray/roles/network_plugin/calico/vars/opensuse.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/opensuse.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/roles/network_plugin/calico/vars/redhat-9.yml b/kubespray/roles/network_plugin/calico/vars/redhat-9.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/redhat-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/roles/network_plugin/calico/vars/redhat.yml b/kubespray/roles/network_plugin/calico/vars/redhat.yml new file mode 100644 index 0000000..a83a8a5 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/redhat.yml @@ -0,0 +1,4 @@ +--- +calico_wireguard_packages: + - wireguard-dkms + - wireguard-tools diff --git a/kubespray/roles/network_plugin/calico/vars/rocky-9.yml b/kubespray/roles/network_plugin/calico/vars/rocky-9.yml new file mode 100644 index 0000000..43df545 --- /dev/null +++ b/kubespray/roles/network_plugin/calico/vars/rocky-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/kubespray/roles/network_plugin/canal/defaults/main.yml b/kubespray/roles/network_plugin/canal/defaults/main.yml new file mode 100644 index 0000000..419cc36 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/defaults/main.yml @@ -0,0 +1,33 @@ +--- +# The interface used by canal for host <-> host communication. +# If left blank, then the interface is choosing using the node's +# default route. +canal_iface: "" + +# Whether or not to masquerade traffic to destinations not within +# the pod network. +canal_masquerade: "true" + +# Etcd SSL dirs +canal_cert_dir: /etc/canal/certs + +# Canal Network Policy directory +canal_policy_dir: /etc/kubernetes/policy + +# Limits for apps +calico_node_memory_limit: 500M +calico_node_cpu_limit: 200m +calico_node_memory_requests: 64M +calico_node_cpu_requests: 50m +flannel_memory_limit: 500M +flannel_cpu_limit: 200m +flannel_memory_requests: 64M +flannel_cpu_requests: 50m + +# etcd cert filenames +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Set log path for calico CNI plugin. Set to false to disable logging to disk. +calico_cni_log_file_path: /var/log/calico/cni/cni.log diff --git a/kubespray/roles/network_plugin/canal/handlers/main.yml b/kubespray/roles/network_plugin/canal/handlers/main.yml new file mode 100644 index 0000000..7769b99 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: reset_canal_cni + command: /bin/true + notify: + - delete 10-canal.conflist + - delete canal-node containers + +- name: delete 10-canal.conflist + file: + path: /etc/canal/10-canal.conflist + state: absent + +- name: delete canal-node containers + shell: "docker ps -af name=k8s_POD_canal-node* -q | xargs --no-run-if-empty docker rm -f" diff --git a/kubespray/roles/network_plugin/canal/tasks/main.yml b/kubespray/roles/network_plugin/canal/tasks/main.yml new file mode 100644 index 0000000..4117d1c --- /dev/null +++ b/kubespray/roles/network_plugin/canal/tasks/main.yml @@ -0,0 +1,103 @@ +--- +- name: Canal | Write Canal cni config + template: + src: "cni-canal.conflist.j2" + dest: "/etc/cni/net.d/canal.conflist.template" + mode: 0644 + owner: "{{ kube_owner }}" + register: canal_conflist + notify: reset_canal_cni + +- name: Canal | Create canal certs directory + file: + dest: "{{ canal_cert_dir }}" + state: directory + mode: 0750 + owner: root + group: root + +- name: Canal | Link etcd certificates for canal-node + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ canal_cert_dir }}/{{ item.d }}" + state: hard + mode: 0640 + force: yes + with_items: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + +- name: Slurp etcd cacert file + slurp: + src: "{{ canal_cert_dir }}/ca_cert.crt" + register: etcd_ca_cert_file + failed_when: false + +- name: Slurp etcd cert file + slurp: + src: "{{ canal_cert_dir }}/cert.crt" + register: etcd_cert_file + failed_when: false + +- name: Slurp etcd key file + slurp: + src: "{{ canal_cert_dir }}/key.pem" + register: etcd_key_file + failed_when: false + +# Flannel need etcd v2 API +- name: Canal | Set Flannel etcd configuration + command: |- + {{ bin_dir }}/etcdctl set /coreos.com/network/config \ + '{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }' + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + delegate_to: "{{ groups['etcd'][0] }}" + changed_when: false + run_once: true + environment: + ETCDCTL_API: 2 + ETCDCTL_CA_FILE: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" + ETCDCTL_CERT_FILE: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'][0] + '.pem' }}" + ETCDCTL_KEY_FILE: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'][0] + '-key.pem' }}" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + +- name: Canal | Create canal node manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: canal-calico-etcd-secret, file: canal-secret-calico-etcd.yml, type: secret} + - {name: canal-config, file: canal-config.yaml, type: cm} + - {name: canal-node, file: canal-node.yaml, type: ds} + - {name: canal-kube-controllers, file: canal-calico-kube-controllers.yml, type: deployment} + - {name: canal-cr, file: canal-cr.yml, type: clusterrole} + - {name: canal, file: canal-node-sa.yml, type: sa} + - {name: calico-cr, file: canal-cr-calico-node.yml, type: clusterrole} + - {name: calico-kube-cr, file: canal-cr-calico-kube-controllers.yml, type: clusterrole} + - {name: calico-crd, file: canal-crd-calico.yml, type: crd} + - {name: flannel, file: canal-cr-flannel.yml, type: clusterrole} + - {name: canal, file: canal-crb-canal.yml, type: clusterrolebinding} + - {name: canal-calico, file: canal-crb-calico.yml, type: clusterrolebinding} + - {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding} + register: canal_manifests + when: + - inventory_hostname in groups['kube_control_plane'] + +- name: Canal | Install calicoctl wrapper script + template: + src: calicoctl.sh.j2 + dest: "{{ bin_dir }}/calicoctl.sh" + mode: 0755 + owner: root + group: root + +- name: Canal | Create network policy directory + file: + path: "{{ canal_policy_dir }}" + state: directory + mode: 0755 diff --git a/kubespray/roles/network_plugin/canal/templates/calicoctl.sh.j2 b/kubespray/roles/network_plugin/canal/templates/calicoctl.sh.j2 new file mode 100644 index 0000000..8343ef8 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/calicoctl.sh.j2 @@ -0,0 +1,6 @@ +#!/bin/bash +ETCD_ENDPOINTS={{ etcd_access_addresses }} \ +ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ +ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ +ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ +{{ bin_dir }}/calicoctl "$@" diff --git a/kubespray/roles/network_plugin/canal/templates/canal-calico-kube-controllers.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-calico-kube-controllers.yml.j2 new file mode 100644 index 0000000..1417022 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-calico-kube-controllers.yml.j2 @@ -0,0 +1,96 @@ +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + containers: + - name: calico-kube-controllers + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,namespace,serviceaccount,workloadendpoint,node + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0440 diff --git a/kubespray/roles/network_plugin/canal/templates/canal-config.yaml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-config.yaml.j2 new file mode 100644 index 0000000..8aab6fb --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-config.yaml.j2 @@ -0,0 +1,80 @@ +# This ConfigMap can be used to configure a self-hosted Canal installation. +# See `canal.yaml` for an example of a Canal deployment which uses +# the config in this ConfigMap. +kind: ConfigMap +apiVersion: v1 +metadata: + name: canal-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "{{ etcd_access_addresses }}" + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "/calico-secrets/etcd-ca" + etcd_cert: "/calico-secrets/etcd-cert" + etcd_key: "/calico-secrets/etcd-key" + + # Typha is disabled. + typha_service_name: "none" + + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "{{ canal_iface }}" + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "{{ canal_masquerade }}" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "canal", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "type": "calico", + "include_default_routes": true, + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", +{% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", +{% endif %} + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] + } + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": "{{ kube_pods_subnet }}", + "Backend": { + "Type": "vxlan" + } + } + diff --git a/kubespray/roles/network_plugin/canal/templates/canal-cr-calico-kube-controllers.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-cr-calico-kube-controllers.yml.j2 new file mode 100644 index 0000000..e3c03c4 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-cr-calico-kube-controllers.yml.j2 @@ -0,0 +1,83 @@ +# Source: calico/templates/calico-kube-controllers-rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are watched to check for existence as part of IPAM controller. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch diff --git a/kubespray/roles/network_plugin/canal/templates/canal-cr-calico-node.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-cr-calico-node.yml.j2 new file mode 100644 index 0000000..d80a7e0 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-cr-calico-node.yml.j2 @@ -0,0 +1,133 @@ +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - canal + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update diff --git a/kubespray/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 new file mode 100644 index 0000000..b2236d1 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 @@ -0,0 +1,23 @@ +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch diff --git a/kubespray/roles/network_plugin/canal/templates/canal-cr.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-cr.yml.j2 new file mode 100644 index 0000000..1209c7b --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-cr.yml.j2 @@ -0,0 +1,30 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + verbs: + - create + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list diff --git a/kubespray/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 new file mode 100644 index 0000000..415a2a2 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -0,0 +1,27 @@ +--- +# Bind the calico ClusterRole to the canal ServiceAccount. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/roles/network_plugin/canal/templates/canal-crb-canal.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-crb-canal.yml.j2 new file mode 100644 index 0000000..9fcb0fc --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-crb-canal.yml.j2 @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: canal +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/kubespray/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 new file mode 100644 index 0000000..5960139 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -0,0 +1,14 @@ +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/kubespray/roles/network_plugin/canal/templates/canal-crd-calico.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-crd-calico.yml.j2 new file mode 100644 index 0000000..4f0653a --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-crd-calico.yml.j2 @@ -0,0 +1,3929 @@ +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all interfaces with + BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled or Strict. [Default: Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing intepreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix''s (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s]' + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + floatingIPs: + default: Disabled + description: FloatingIPs configures whether or not Felix will program + floating IP addresses. + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for VXLAN networking. Optional as Felix determines + this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubespray/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 new file mode 100644 index 0000000..954f6d7 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system diff --git a/kubespray/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-node.yaml.j2 new file mode 100644 index 0000000..529d4b9 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -0,0 +1,418 @@ +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: canal + namespace: kube-system + labels: + k8s-app: canal +spec: + selector: + matchLabels: + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + template: + metadata: + labels: + k8s-app: canal + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Set the serviceaccount name to use for the Calico CNI plugin. + # We use canal-node instead of calico-node when using flannel networking. + - name: CALICO_CNI_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: canal-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: canal-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: "{{ calico_node_image_repo }}:{{ calico_node_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: "{{ calico_node_image_repo }}:{{ calico_node_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Set the serviceaccount name to use for the Calico CNI plugin. + # We use canal-node instead of calico-node when using flannel networking. + - name: CALICO_CNI_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,canal" + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: "60" + # No IP address needed. + - name: IP + value: "" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + limits: + cpu: {{ calico_node_cpu_limit }} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + # Runs the flannel daemon to enable vxlan networking between + # container hosts. + - name: flannel + image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}" + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"] + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + # The location of the etcd cluster. + - name: FLANNELD_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # Location of the CA certificate for etcd. + - name: FLANNELD_ETCD_CAFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: FLANNELD_ETCD_KEYFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: FLANNELD_ETCD_CERTFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # The interface flannel should run on. + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: canal-config + key: canal_iface + # Perform masquerade on traffic leaving the pod cidr. + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: canal-config + key: masquerade + # Write the subnet.env file to the mounted directory. + - name: FLANNELD_SUBNET_FILE + value: "/run/flannel/subnet.env" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/resolv.conf + name: resolv + - mountPath: /run/flannel + name: run-flannel + - mountPath: /calico-secrets + name: etcd-certs + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: flannel-cfg + configMap: + name: canal-config + # Used by canal-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used by flannel. + - name: run-flannel + hostPath: + path: /run/flannel + - name: resolv + hostPath: + path: /etc/resolv.conf + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent diff --git a/kubespray/roles/network_plugin/canal/templates/canal-secret-calico-etcd.yml.j2 b/kubespray/roles/network_plugin/canal/templates/canal-secret-calico-etcd.yml.j2 new file mode 100644 index 0000000..bed51c7 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/canal-secret-calico-etcd.yml.j2 @@ -0,0 +1,18 @@ +# Source: calico/templates/calico-etcd-secrets.yaml +# The following contains k8s Secrets for use with a TLS enabled etcd cluster. +# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: calico-etcd-secrets + namespace: kube-system +data: + # Populate the following with etcd TLS configuration if desired, but leave blank if + # not using TLS for etcd. + # The keys below should be uncommented and the values populated with the base64 + # encoded contents of each file that would be associated with the TLS data. + # Example command for encoding a file contents: cat | base64 -w 0 + etcd-key: {{ etcd_key_file.content }} + etcd-cert: {{ etcd_cert_file.content }} + etcd-ca: {{ etcd_ca_cert_file.content }} diff --git a/kubespray/roles/network_plugin/canal/templates/cni-canal.conflist.j2 b/kubespray/roles/network_plugin/canal/templates/cni-canal.conflist.j2 new file mode 100644 index 0000000..3902a81 --- /dev/null +++ b/kubespray/roles/network_plugin/canal/templates/cni-canal.conflist.j2 @@ -0,0 +1,34 @@ + { + "name": "canal", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "type": "calico", + "include_default_routes": true, + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", +{% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", +{% endif %} + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] + } diff --git a/kubespray/roles/network_plugin/cilium/defaults/main.yml b/kubespray/roles/network_plugin/cilium/defaults/main.yml new file mode 100644 index 0000000..b58b39e --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/defaults/main.yml @@ -0,0 +1,256 @@ +--- +cilium_min_version_required: "1.10" +# Log-level +cilium_debug: false + +cilium_mtu: "" +cilium_enable_ipv4: true +cilium_enable_ipv6: false + +# Cilium agent health port +cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +cilium_cert_dir: /etc/cilium/certs +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Limits for apps +cilium_memory_limit: 500M +cilium_cpu_limit: 500m +cilium_memory_requests: 64M +cilium_cpu_requests: 100m + +# Overlay Network Mode +cilium_tunnel_mode: vxlan +# Optional features +cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +cilium_monitor_aggregation: medium +# Kube Proxy Replacement mode (strict/probe/partial) +cilium_kube_proxy_replacement: probe + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +cilium_enable_legacy_services: false + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +cilium_wireguard_userspace_fallback: false + +# Enable Bandwidth Manager +# Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. +# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. +# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. +# Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +cilium_enable_bandwidth_manager: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +cilium_non_masquerade_cidrs: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + - 100.64.0.0/10 + - 192.0.0.0/24 + - 192.0.2.0/24 + - 192.88.99.0/24 + - 198.18.0.0/15 + - 198.51.100.0/24 + - 203.0.113.0/24 + - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +cilium_ip_masq_resync_interval: 60s + +# Hubble +### Enable Hubble without install +cilium_enable_hubble: false +### Enable Hubble Metrics +cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +cilium_hubble_tls_generate: false + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +cilium_agent_extra_volumes: [] +cilium_agent_extra_volume_mounts: [] + +cilium_agent_extra_env_vars: [] + +cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +cilium_operator_extra_volumes: [] +cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +cilium_bpf_map_dynamic_size_ratio: "0.0025" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +cilium_enable_bpf_masquerade: false + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +cilium_enable_well_known_identities: false + +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +cilium_monitor_aggregation_flags: "all" + +cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +cilium_disable_cnp_status_updates: true + +# Configure how long to wait for the Cilium DaemonSet to be ready again +cilium_rolling_restart_wait_retries_count: 30 +cilium_rolling_restart_wait_retries_delay_seconds: 10 + +# Cilium changed the default metrics exporter ports in 1.12 +cilium_agent_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9962', '9090') }}" +cilium_operator_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9963', '6942') }}" +cilium_hubble_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9965', '9091') }}" diff --git a/kubespray/roles/network_plugin/cilium/meta/main.yml b/kubespray/roles/network_plugin/cilium/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/roles/network_plugin/cilium/tasks/apply.yml b/kubespray/roles/network_plugin/cilium/tasks/apply.yml new file mode 100644 index 0000000..b977c21 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/tasks/apply.yml @@ -0,0 +1,33 @@ +--- +- name: Cilium | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + +- name: Cilium | Wait for pods to run + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 + register: pods_not_ready + until: pods_not_ready.stdout.find("cilium")==-1 + retries: "{{ cilium_rolling_restart_wait_retries_count | int }}" + delay: "{{ cilium_rolling_restart_wait_retries_delay_seconds | int }}" + failed_when: false + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Cilium | Hubble install + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_hubble_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + - cilium_enable_hubble and cilium_hubble_install diff --git a/kubespray/roles/network_plugin/cilium/tasks/check.yml b/kubespray/roles/network_plugin/cilium/tasks/check.yml new file mode 100644 index 0000000..c65591f --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/tasks/check.yml @@ -0,0 +1,63 @@ +--- +- name: Cilium | Check Cilium encryption `cilium_ipsec_key` for ipsec + assert: + that: + - "cilium_ipsec_key is defined" + msg: "cilium_ipsec_key should be defined to enable encryption using ipsec" + when: + - cilium_encryption_enabled + - cilium_encryption_type == "ipsec" + - cilium_tunnel_mode in ['vxlan'] + +# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled` +- name: Stop if `cilium_ipsec_enabled` is defined and `cilium_encryption_type` is not `ipsec` + assert: + that: cilium_encryption_type == 'ipsec' + msg: > + It is not possible to use `cilium_ipsec_enabled` when `cilium_encryption_type` is set to {{ cilium_encryption_type }}. + when: + - cilium_ipsec_enabled is defined + - cilium_ipsec_enabled + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + +- name: Stop if kernel version is too low for Cilium Wireguard encryption + assert: + that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') + when: + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + - cilium_encryption_enabled + - cilium_encryption_type == "wireguard" + - not ignore_assert_errors + +- name: Stop if bad Cilium identity allocation mode + assert: + that: cilium_identity_allocation_mode in ['crd', 'kvstore'] + msg: "cilium_identity_allocation_mode must be either 'crd' or 'kvstore'" + +- name: Stop if bad Cilium Cluster ID + assert: + that: + - cilium_cluster_id <= 255 + - cilium_cluster_id >= 0 + msg: "'cilium_cluster_id' must be between 1 and 255" + when: cilium_cluster_id is defined + +- name: Stop if bad encryption type + assert: + that: cilium_encryption_type in ['ipsec', 'wireguard'] + msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'" + when: cilium_encryption_enabled + +- name: Stop if cilium_version is < v1.10.0 + assert: + that: cilium_version | regex_replace('v') is version(cilium_min_version_required, '>=') + msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}" + +# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled` +- name: Set `cilium_encryption_type` to "ipsec" and if `cilium_ipsec_enabled` is true + set_fact: + cilium_encryption_type: ipsec + cilium_encryption_enabled: true + when: + - cilium_ipsec_enabled is defined + - cilium_ipsec_enabled diff --git a/kubespray/roles/network_plugin/cilium/tasks/install.yml b/kubespray/roles/network_plugin/cilium/tasks/install.yml new file mode 100644 index 0000000..9e89b7b --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/tasks/install.yml @@ -0,0 +1,97 @@ +--- +- name: Cilium | Ensure BPFFS mounted + mount: + fstype: bpf + path: /sys/fs/bpf + src: bpffs + state: mounted + +- name: Cilium | Create Cilium certs directory + file: + dest: "{{ cilium_cert_dir }}" + state: directory + mode: 0750 + owner: root + group: root + when: + - cilium_identity_allocation_mode == "kvstore" + +- name: Cilium | Link etcd certificates for cilium + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ cilium_cert_dir }}/{{ item.d }}" + mode: 0644 + state: hard + force: yes + loop: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + when: + - cilium_identity_allocation_mode == "kvstore" + +- name: Cilium | Create hubble dir + file: + path: "{{ kube_config_dir }}/addons/hubble" + state: directory + owner: root + group: root + mode: 0755 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - cilium_hubble_install + +- name: Cilium | Create Cilium node manifests + template: + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" + mode: 0644 + loop: + - {name: cilium, file: config.yml, type: cm} + - {name: cilium-operator, file: crb.yml, type: clusterrolebinding} + - {name: cilium-operator, file: cr.yml, type: clusterrole} + - {name: cilium, file: crb.yml, type: clusterrolebinding} + - {name: cilium, file: cr.yml, type: clusterrole} + - {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"} + - {name: cilium, file: ds.yml, type: ds} + - {name: cilium-operator, file: deploy.yml, type: deploy} + - {name: cilium-operator, file: sa.yml, type: sa} + - {name: cilium, file: sa.yml, type: sa} + register: cilium_node_manifests + when: + - inventory_hostname in groups['kube_control_plane'] + - item.when | default(True) | bool + +- name: Cilium | Create Cilium Hubble manifests + template: + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}" + mode: 0644 + loop: + - {name: hubble, file: config.yml, type: cm} + - {name: hubble, file: crb.yml, type: clusterrolebinding} + - {name: hubble, file: cr.yml, type: clusterrole} + - {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: deploy.yml, type: deploy} + - {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: sa.yml, type: sa} + - {name: hubble, file: service.yml, type: service} + register: cilium_hubble_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] + - cilium_enable_hubble and cilium_hubble_install + - item.when | default(True) | bool + +- name: Cilium | Enable portmap addon + template: + src: 000-cilium-portmap.conflist.j2 + dest: /etc/cni/net.d/000-cilium-portmap.conflist + mode: 0644 + when: cilium_enable_portmap + +- name: Cilium | Copy Ciliumcli binary from download dir + copy: + src: "{{ local_release_dir }}/cilium" + dest: "{{ bin_dir }}/cilium" + mode: 0755 + remote_src: yes diff --git a/kubespray/roles/network_plugin/cilium/tasks/main.yml b/kubespray/roles/network_plugin/cilium/tasks/main.yml new file mode 100644 index 0000000..63c99dc --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- import_tasks: check.yml + +- include_tasks: install.yml + +- include_tasks: apply.yml diff --git a/kubespray/roles/network_plugin/cilium/tasks/reset.yml b/kubespray/roles/network_plugin/cilium/tasks/reset.yml new file mode 100644 index 0000000..432df8a --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/tasks/reset.yml @@ -0,0 +1,9 @@ +--- +- name: reset | check and remove devices if still present + include_tasks: reset_iface.yml + vars: + iface: "{{ item }}" + loop: + - cilium_host + - cilium_net + - cilium_vxlan diff --git a/kubespray/roles/network_plugin/cilium/tasks/reset_iface.yml b/kubespray/roles/network_plugin/cilium/tasks/reset_iface.yml new file mode 100644 index 0000000..d84a065 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/tasks/reset_iface.yml @@ -0,0 +1,12 @@ +--- +- name: "reset | check if network device {{ iface }} is present" + stat: + path: "/sys/class/net/{{ iface }}" + get_attributes: no + get_checksum: no + get_mime: no + register: device_remains + +- name: "reset | remove network device {{ iface }}" + command: "ip link del {{ iface }}" + when: device_remains.stat.exists diff --git a/kubespray/roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2 b/kubespray/roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2 new file mode 100644 index 0000000..982a7c9 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2 @@ -0,0 +1,13 @@ +{ + "cniVersion": "0.3.1", + "name": "cilium-portmap", + "plugins": [ + { + "type": "cilium-cni" + }, + { + "type": "portmap", + "capabilities": { "portMappings": true } + } + ] +} diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 new file mode 100644 index 0000000..8a40a66 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 @@ -0,0 +1,146 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies + - ciliumenvoyconfigs +{% endif %} + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumbgploadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumegressnatpolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io +{% endif %} diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 new file mode 100644 index 0000000..00f0835 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 new file mode 100644 index 0000000..5a5bd4a --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 @@ -0,0 +1,166 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: + replicas: {{ cilium_operator_replicas }} + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{% if cilium_enable_prometheus %} + annotations: + prometheus.io/port: "{{ cilium_operator_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - name: cilium-operator + image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-operator + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) +{% if cilium_operator_custom_args is string %} + - {{ cilium_operator_custom_args }} +{% else %} +{% for flag in cilium_operator_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_ACCESS_KEY_ID + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_SECRET_ACCESS_KEY + optional: true + - name: AWS_DEFAULT_REGION + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_DEFAULT_REGION + optional: true +{% if cilium_kube_proxy_replacement == 'strict' %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% if cilium_enable_prometheus %} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: prometheus + containerPort: {{ cilium_operator_scrape_port }} + hostPort: {{ cilium_operator_scrape_port }} + protocol: TCP +{% endif %} + livenessProbe: + httpGet: +{% if cilium_enable_ipv4 %} + host: 127.0.0.1 +{% else %} + host: '::1' +{% endif %} + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{cilium_cert_dir}}" + readOnly: true +{% endif %} +{% for volume_mount in cilium_operator_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }} +{% endfor %} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + tolerations: + - operator: Exists + volumes: + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{cilium_cert_dir}}" +{% endif %} +{% for volume in cilium_operator_extra_volumes %} + - {{ volume | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 new file mode 100644 index 0000000..c5d1893 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium/config.yml.j2 new file mode 100644 index 0000000..7a524c6 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -0,0 +1,248 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + identity-allocation-mode: {{ cilium_identity_allocation_mode }} + +{% if cilium_identity_allocation_mode == "kvstore" %} + # This etcd-config contains the etcd endpoints of your cluster. If you use + # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config + etcd-config: |- + --- + endpoints: +{% for ip_addr in etcd_access_addresses.split(',') %} + - {{ ip_addr }} +{% endfor %} + + # In case you want to use TLS in etcd, uncomment the 'ca-file' line + # and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config + ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" + + # In case you want client to server authentication, uncomment the following + # lines and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config + key-file: "{{ cilium_cert_dir }}/key.pem" + cert-file: "{{ cilium_cert_dir }}/cert.crt" + + # kvstore + # https://docs.cilium.io/en/latest/cmdref/kvstore/ + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' +{% endif %} + + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. +{% if cilium_enable_prometheus %} + prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}" + operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}" + enable-metrics: "true" +{% endif %} + + # If you want to run cilium in debug mode change this value to true + debug: "{{ cilium_debug }}" + enable-ipv4: "{{ cilium_enable_ipv4 }}" + enable-ipv6: "{{ cilium_enable_ipv6 }}" + # If a serious issue occurs during Cilium startup, this + # invasive option may be set to true to remove all persistent + # state. Endpoints will not be restored using knowledge from a + # prior Cilium run, so they may receive new IP addresses upon + # restart. This also triggers clean-cilium-bpf-state. + clean-cilium-state: "false" + # If you want to clean cilium BPF state, set this to true; + # Removes all BPF maps from the filesystem. Upon restart, + # endpoints are restored with the same IP addresses, however + # any ongoing connections may be disrupted briefly. + # Loadbalancing decisions will be reset, so any ongoing + # connections via a service may be loadbalanced to a different + # backend after restart. + clean-cilium-bpf-state: "false" + + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ cilium_monitor_aggregation }}" + + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "524288" + bpf-ct-global-any-max: "262144" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "{{ cilium_tunnel_mode }}" + + # Enable Bandwidth Manager + # Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + # Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. + # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +{% if cilium_enable_bandwidth_manager %} + enable-bandwidth-manager: "true" +{% endif %} + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ cilium_cluster_name }}" + + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + #cluster-id: 1 +{% if cilium_cluster_id is defined %} + cluster-id: "{{ cilium_cluster_id }}" +{% endif %} + +# `wait-bpf-mount` is removed after v1.10.4 +# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da +{% if cilium_version | regex_replace('v') is version('1.10.4', '<') %} + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" +{% endif %} + + kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}" + +# `native-routing-cidr` is deprecated in 1.10, removed in 1.12. +# Replaced by `ipv4-native-routing-cidr` +# https://github.com/cilium/cilium/pull/16695 +{% if cilium_version | regex_replace('v') is version('1.12', '<') %} + native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% else %} +{% if cilium_native_routing_cidr | length %} + ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% endif %} +{% if cilium_native_routing_cidr_ipv6 | length %} + ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}" +{% endif %} +{% endif %} + + auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}" + + operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}" + + # Hubble settings +{% if cilium_enable_hubble %} + enable-hubble: "true" +{% if cilium_enable_hubble_metrics %} + hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}" + hubble-metrics: +{% for hubble_metrics_cycle in cilium_hubble_metrics %} + {{ hubble_metrics_cycle }} +{% endfor %} +{% endif %} + hubble-listen-address: ":4244" +{% if cilium_enable_hubble and cilium_hubble_install %} + hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt +{% endif %} +{% endif %} + + # IP Masquerade Agent + enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}" + +{% for key, value in cilium_config_extra_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} + + # Enable transparent network encryption +{% if cilium_encryption_enabled %} +{% if cilium_encryption_type == "ipsec" %} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + encrypt-node: "{{ cilium_ipsec_node_encryption }}" +{% endif %} + +{% if cilium_encryption_type == "wireguard" %} + enable-wireguard: "true" + enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}" +{% endif %} +{% endif %} + + # IPAM settings + ipam: "{{ cilium_ipam_mode }}" + + agent-health-port: "{{ cilium_agent_health_port }}" + +{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_host_root != '' %} + cgroup-root: "{{ cilium_cgroup_host_root }}" +{% endif %} + + bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}" + + enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}" + enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}" + + enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}" + + enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}" + + enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}" + + enable-well-known-identities: "{{ cilium_enable_well_known_identities }}" + + monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}" + + enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}" + + disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}" +{% if cilium_ip_masq_agent_enable %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ip-masq-agent + namespace: kube-system +data: + config: | + nonMasqueradeCIDRs: +{% for cidr in cilium_non_masquerade_cidrs %} + - {{ cidr }} +{% endfor %} + masqLinkLocal: {{ cilium_masq_link_local|bool }} + resyncInterval: "{{ cilium_ip_masq_resync_interval }}" +{% endif %} diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 new file mode 100644 index 0000000..a16211c --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 @@ -0,0 +1,122 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +{% if cilium_version | regex_replace('v') is version('1.12', '<') %} +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - update +{% endif %} +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + # Deprecated for removal in v1.10 + - create + - list + - watch + - update + + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + - ciliumnodes + - ciliumnodes/status + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumegressnatpolicies +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.11.5', '<') %} + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints/finalizers + - ciliumnodes/finalizers + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies/finalizers +{% endif %} + verbs: + - '*' +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} +- apiGroups: + - cilium.io + resources: + - ciliumclusterwideenvoyconfigs + - ciliumenvoyconfigs + - ciliumegressgatewaypolicies + verbs: + - list + - watch +{% endif %} diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 new file mode 100644 index 0000000..d23897f --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 new file mode 100644 index 0000000..08385b4 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 @@ -0,0 +1,424 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium + namespace: kube-system + labels: + k8s-app: cilium +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + # Specifies the maximum number of Pods that can be unavailable during the update process. + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: +{% if cilium_enable_prometheus %} + prometheus.io/port: "{{ cilium_agent_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' + labels: + k8s-app: cilium + spec: + containers: + - name: cilium-agent + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map +{% if cilium_mtu != "" %} + - --mtu={{ cilium_mtu }} +{% endif %} +{% if cilium_agent_custom_args is string %} + - {{ cilium_agent_custom_args }} +{% else %} +{% for flag in cilium_agent_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + startupProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 105 + periodSeconds: 2 + successThreshold: 1 + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ +{% if cilium_kube_proxy_replacement == 'strict' %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% for env_var in cilium_agent_extra_env_vars %} + - {{ env_var | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} + lifecycle: + postStart: + exec: + command: + - "/cni-install.sh" + - "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}" +{% if cilium_version | regex_replace('v') is version('1.12', '>=') %} + - "--enable-debug={{ cilium_debug | string | lower }}" + - "--log-file={{ cilium_cni_log_file }}" +{% endif %} + preStop: + exec: + command: + - /cni-uninstall.sh + resources: + limits: + cpu: {{ cilium_cpu_limit }} + memory: {{ cilium_memory_limit }} + requests: + cpu: {{ cilium_cpu_requests }} + memory: {{ cilium_memory_requests }} +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} + ports: +{% endif %} +{% if cilium_enable_prometheus %} + - name: prometheus + containerPort: {{ cilium_agent_scrape_port }} + hostPort: {{ cilium_agent_scrape_port }} + protocol: TCP +{% endif %} +{% if cilium_enable_hubble_metrics %} + - name: hubble-metrics + containerPort: {{ cilium_hubble_scrape_port }} + hostPort: {{ cilium_hubble_scrape_port }} + protocol: TCP +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: cilium-run + mountPath: /var/run/cilium + - name: cni-path + mountPath: /host/opt/cni/bin + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{cilium_cert_dir}}" + readOnly: true +{% endif %} + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + mountPath: /etc/config + readOnly: true +{% endif %} + # Needed to be able to load kernel modules + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + mountPath: /etc/ipsec + readOnly: true +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + mountPath: /var/lib/cilium/tls/hubble + readOnly: true +{% endif %} +{% for volume_mount in cilium_agent_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + hostNetwork: true + initContainers: +{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %} + - name: mount-cgroup + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: CGROUP_ROOT + value: {{ cilium_cgroup_host_root }} + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} +{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %} + - name: apply-sysctl-overwrites + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} + - name: clean-cilium-state + image: "{{cilium_image_repo}}:{{cilium_image_tag}}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true +# Removed in 1.11 and up. +# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9 +{% if cilium_version | regex_replace('v') is version('1.11', '<') %} + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true +{% endif %} +{% if cilium_kube_proxy_replacement == 'strict' %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: {{ cilium_cgroup_host_root }} + mountPropagation: HostToContainer +{% endif %} + - name: cilium-run + mountPath: /var/run/cilium + resources: + requests: + cpu: 100m + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + hostNetwork: true +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate +{% if cilium_version | regex_replace('v') is version('1.11', '>=') %} + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: {{ cilium_cgroup_host_root }} + type: DirectoryOrCreate +{% endif %} + # To install cilium cni plugin in the host + - name: cni-path + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + # To install cilium cni configuration in the host + - name: etc-cni-netd + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + # To be able to load kernel modules + - name: lib-modules + hostPath: + path: /lib/modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{cilium_cert_dir}}" +{% endif %} + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + secretName: cilium-clustermesh + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + optional: true + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + configMap: + name: ip-masq-agent + optional: true + items: + - key: config + path: ip-masq-agent +{% endif %} +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-server-certs + optional: true + items: + - key: ca.crt + path: client-ca.crt + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key +{% endif %} diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 new file mode 100644 index 0000000..c03ac59 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system diff --git a/kubespray/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 new file mode 100644 index 0000000..a5fcc56 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +data: + keys: {{ cilium_ipsec_key }} +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +type: Opaque \ No newline at end of file diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/config.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/config.yml.j2 new file mode 100644 index 0000000..4f42abe --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/config.yml.j2 @@ -0,0 +1,87 @@ +--- +# Source: cilium/templates/hubble-relay-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + peer-service: unix:///var/run/cilium/hubble.sock + listen-address: :4245 + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt + disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} + disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} +--- +# Source: cilium/templates/hubble-ui-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-ui-envoy + namespace: kube-system +data: + envoy.yaml: | + static_resources: + listeners: + - name: listener_hubble_ui + address: + socket_address: + address: 0.0.0.0 + port_value: 8081 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ['*'] + routes: + - match: + prefix: '/api/' + route: + cluster: backend + max_grpc_timeout: 0s + prefix_rewrite: '/' + - match: + prefix: '/' + route: + cluster: frontend + cors: + allow_origin_string_match: + - prefix: '*' + allow_methods: GET, PUT, DELETE, POST, OPTIONS + allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout + max_age: '1728000' + expose_headers: grpc-status,grpc-message + http_filters: + - name: envoy.filters.http.grpc_web + - name: envoy.filters.http.cors + - name: envoy.filters.http.router + clusters: + - name: frontend + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + hosts: + - socket_address: + address: 127.0.0.1 + port_value: 8080 + - name: backend + connect_timeout: 0.25s + type: logical_dns + lb_policy: round_robin + http2_protocol_options: {} + hosts: + - socket_address: + address: 127.0.0.1 + port_value: 8090 diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 new file mode 100644 index 0000000..4a95565 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 @@ -0,0 +1,106 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hubble-generate-certs +rules: + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-server-certs + - hubble-relay-client-certs + - hubble-relay-server-certs + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - hubble-ca-cert + verbs: + - update + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-ca-secret + verbs: + - get +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +--- +# Source: cilium/templates/hubble-ui-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +rules: + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - "*" + verbs: + - get + - list + - watch diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 new file mode 100644 index 0000000..f033429 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 @@ -0,0 +1,44 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hubble-generate-certs +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-generate-certs +subjects: +- kind: ServiceAccount + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +--- +# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-ui +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-ui diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 new file mode 100644 index 0000000..dd97bbf --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 @@ -0,0 +1,49 @@ +--- +# Source: cilium/templates/hubble-generate-certs-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + schedule: "0 0 1 */4 *" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + - "--cilium-namespace=kube-system" + - "--hubble-ca-reuse-secret=true" + - "--hubble-ca-secret-name=hubble-ca-secret" + - "--hubble-ca-generate=true" + - "--hubble-ca-validity-duration=94608000s" + - "--hubble-ca-config-map-create=true" + - "--hubble-ca-config-map-name=hubble-ca-cert" + - "--hubble-server-cert-generate=true" + - "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io" + - "--hubble-server-cert-validity-duration=94608000s" + - "--hubble-server-cert-secret-name=hubble-server-certs" + - "--hubble-relay-client-cert-generate=true" + - "--hubble-relay-client-cert-validity-duration=94608000s" + - "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs" + - "--hubble-relay-server-cert-generate=false" + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 new file mode 100644 index 0000000..43dd02b --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 @@ -0,0 +1,161 @@ +--- +# Source: cilium/templates/hubble-relay-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "k8s-app" + operator: In + values: + - cilium + topologyKey: "kubernetes.io/hostname" + containers: + - name: hubble-relay + image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - hubble-relay + args: + - serve + ports: + - name: grpc + containerPort: 4245 + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + volumes: + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + - configMap: + name: hubble-ca-cert + items: + - key: ca.crt + path: hubble-server-ca.crt + name: tls +--- +# Source: cilium/templates/hubble-ui-deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + namespace: kube-system + labels: + k8s-app: hubble-ui + name: hubble-ui +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-ui + template: + metadata: + annotations: + labels: + k8s-app: hubble-ui + spec: + securityContext: + runAsUser: 1001 + serviceAccount: hubble-ui + serviceAccountName: hubble-ui + containers: + - name: frontend + image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 8080 + name: http + resources: + {} + - name: backend + image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: EVENTS_SERVER_PORT + value: "8090" + - name: FLOWS_API_ADDR + value: "hubble-relay:80" + ports: + - containerPort: 8090 + name: grpc + resources: + {} + - name: proxy + image: "{{ cilium_hubble_envoy_image_repo }}:{{ cilium_hubble_envoy_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 8081 + name: http + resources: + {} + command: ["envoy"] + args: + [ + "-c", + "/etc/envoy.yaml", + "-l", + "info" + ] + volumeMounts: + - name: hubble-ui-envoy-yaml + mountPath: /etc/envoy.yaml + subPath: envoy.yaml + volumes: + - name: hubble-ui-envoy-yaml + configMap: + name: hubble-ui-envoy diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/job.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/job.yml.j2 new file mode 100644 index 0000000..38a42bf --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/job.yml.j2 @@ -0,0 +1,45 @@ +--- +# Source: cilium/templates/hubble-generate-certs-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + - "--cilium-namespace=kube-system" + - "--hubble-ca-reuse-secret=true" + - "--hubble-ca-secret-name=hubble-ca-secret" + - "--hubble-ca-generate=true" + - "--hubble-ca-validity-duration=94608000s" + - "--hubble-ca-config-map-create=true" + - "--hubble-ca-config-map-name=hubble-ca-cert" + - "--hubble-server-cert-generate=true" + - "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io" + - "--hubble-server-cert-validity-duration=94608000s" + - "--hubble-server-cert-secret-name=hubble-server-certs" + - "--hubble-relay-client-cert-generate=true" + - "--hubble-relay-client-cert-validity-duration=94608000s" + - "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs" + - "--hubble-relay-server-cert-generate=false" + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 new file mode 100644 index 0000000..9b3203d --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 @@ -0,0 +1,23 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-relay + namespace: kube-system +--- +# Source: cilium/templates/hubble-ui-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-ui + namespace: kube-system diff --git a/kubespray/roles/network_plugin/cilium/templates/hubble/service.yml.j2 b/kubespray/roles/network_plugin/cilium/templates/hubble/service.yml.j2 new file mode 100644 index 0000000..56dba76 --- /dev/null +++ b/kubespray/roles/network_plugin/cilium/templates/hubble/service.yml.j2 @@ -0,0 +1,58 @@ +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} +--- +# Source: cilium/templates/cilium-agent-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-metrics + namespace: kube-system + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "9091" + labels: + k8s-app: hubble +spec: + clusterIP: None + type: ClusterIP + ports: + - name: hubble-metrics + port: 9091 + protocol: TCP + targetPort: hubble-metrics + selector: + k8s-app: cilium +{% endif %} +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + port: 80 + targetPort: 4245 +--- +# Source: cilium/templates/hubble-ui-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-ui + labels: + k8s-app: hubble-ui + namespace: kube-system +spec: + selector: + k8s-app: hubble-ui + ports: + - name: http + port: 80 + targetPort: 8081 + type: ClusterIP diff --git a/kubespray/roles/network_plugin/cni/tasks/main.yml b/kubespray/roles/network_plugin/cni/tasks/main.yml new file mode 100644 index 0000000..b8bcec3 --- /dev/null +++ b/kubespray/roles/network_plugin/cni/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: CNI | make sure /opt/cni/bin exists + file: + path: /opt/cni/bin + state: directory + mode: 0755 + owner: "{{ kube_owner }}" + recurse: true + +- name: CNI | Copy cni plugins + unarchive: + src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" + dest: "/opt/cni/bin" + mode: 0755 + remote_src: yes diff --git a/kubespray/roles/network_plugin/flannel/defaults/main.yml b/kubespray/roles/network_plugin/flannel/defaults/main.yml new file mode 100644 index 0000000..cd1dcf1 --- /dev/null +++ b/kubespray/roles/network_plugin/flannel/defaults/main.yml @@ -0,0 +1,28 @@ +--- +# Flannel public IP +# The address that flannel should advertise as how to access the system +# Disabled until https://github.com/coreos/flannel/issues/712 is fixed +# flannel_public_ip: "{{ access_ip|default(ip|default(fallback_ips[inventory_hostname])) }}" + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +flannel_backend_type: "vxlan" +flannel_vxlan_vni: 1 +flannel_vxlan_port: 8472 +flannel_vxlan_direct_routing: false + +# Limits for apps +flannel_memory_limit: 500M +flannel_cpu_limit: 300m +flannel_memory_requests: 64M +flannel_cpu_requests: 150m diff --git a/kubespray/roles/network_plugin/flannel/meta/main.yml b/kubespray/roles/network_plugin/flannel/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/roles/network_plugin/flannel/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/roles/network_plugin/flannel/tasks/main.yml b/kubespray/roles/network_plugin/flannel/tasks/main.yml new file mode 100644 index 0000000..2fd82e9 --- /dev/null +++ b/kubespray/roles/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,21 @@ +--- + +- name: Flannel | Stop if kernel version is too low for Flannel Wireguard encryption + assert: + that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') + when: + - kube_network_plugin == 'flannel' + - flannel_backend_type == 'wireguard' + - not ignore_assert_errors + +- name: Flannel | Create Flannel manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: flannel, file: cni-flannel-rbac.yml, type: sa} + - {name: kube-flannel, file: cni-flannel.yml, type: ds} + register: flannel_node_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/kubespray/roles/network_plugin/flannel/tasks/reset.yml b/kubespray/roles/network_plugin/flannel/tasks/reset.yml new file mode 100644 index 0000000..2fd86e2 --- /dev/null +++ b/kubespray/roles/network_plugin/flannel/tasks/reset.yml @@ -0,0 +1,24 @@ +--- +- name: reset | check cni network device + stat: + path: /sys/class/net/cni0 + get_attributes: no + get_checksum: no + get_mime: no + register: cni + +- name: reset | remove the network device created by the flannel + command: ip link del cni0 + when: cni.stat.exists + +- name: reset | check flannel network device + stat: + path: /sys/class/net/flannel.1 + get_attributes: no + get_checksum: no + get_mime: no + register: flannel + +- name: reset | remove the network device created by the flannel + command: ip link del flannel.1 + when: flannel.stat.exists diff --git a/kubespray/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/kubespray/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 new file mode 100644 index 0000000..7c73b09 --- /dev/null +++ b/kubespray/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system diff --git a/kubespray/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/kubespray/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 new file mode 100644 index 0000000..607d225 --- /dev/null +++ b/kubespray/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -0,0 +1,170 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ kube_pods_subnet }}", + "EnableIPv4": true, +{% if enable_dual_stack_networks %} + "EnableIPv6": true, + "IPv6Network": "{{ kube_pods_subnet_ipv6 }}", +{% endif %} + "Backend": { + "Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %}, + "VNI": {{ flannel_vxlan_vni }}, + "Port": {{ flannel_vxlan_port }}, + "DirectRouting": {{ flannel_vxlan_direct_routing | to_json }} +{% endif %} + } + } +{% for arch in ['amd64', 'arm64', 'arm', 'ppc64le', 's390x'] %} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: +{% if arch == 'amd64' %} + name: kube-flannel +{% else %} + name: kube-flannel-ds-{{ arch }} +{% endif %} + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + priorityClassName: system-node-critical + serviceAccountName: flannel + containers: + - name: kube-flannel + image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ] + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - {{ arch }} + initContainers: + - name: install-cni-plugin + image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag | regex_replace(image_arch,'') }}{{ arch }} + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - operator: Exists + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: cni-plugin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate +{% endfor %} diff --git a/kubespray/roles/network_plugin/kube-ovn/OWNERS b/kubespray/roles/network_plugin/kube-ovn/OWNERS new file mode 100644 index 0000000..84256aa --- /dev/null +++ b/kubespray/roles/network_plugin/kube-ovn/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +emeritus_approvers: +- oilbeater diff --git a/kubespray/roles/network_plugin/kube-ovn/defaults/main.yml b/kubespray/roles/network_plugin/kube-ovn/defaults/main.yml new file mode 100644 index 0000000..430f698 --- /dev/null +++ b/kubespray/roles/network_plugin/kube-ovn/defaults/main.yml @@ -0,0 +1,98 @@ +--- +kube_ovn_db_cpu_request: 500m +kube_ovn_db_memory_request: 200Mi +kube_ovn_db_cpu_limit: 3000m +kube_ovn_db_memory_limit: 3000Mi +kube_ovn_node_cpu_request: 200m +kube_ovn_node_memory_request: 200Mi +kube_ovn_node_cpu_limit: 1000m +kube_ovn_node_memory_limit: 800Mi +kube_ovn_cni_server_cpu_request: 200m +kube_ovn_cni_server_memory_request: 200Mi +kube_ovn_cni_server_cpu_limit: 1000m +kube_ovn_cni_server_memory_limit: 1Gi +kube_ovn_controller_cpu_request: 200m +kube_ovn_controller_memory_request: 200Mi +kube_ovn_controller_cpu_limit: 1000m +kube_ovn_controller_memory_limit: 1Gi +kube_ovn_pinger_cpu_request: 100m +kube_ovn_pinger_memory_request: 200Mi +kube_ovn_pinger_cpu_limit: 200m +kube_ovn_pinger_memory_limit: 400Mi +kube_ovn_monitor_memory_request: 200Mi +kube_ovn_monitor_cpu_request: 200m +kube_ovn_monitor_memory_limit: 200Mi +kube_ovn_monitor_cpu_limit: 200m +kube_ovn_dpdk_node_cpu_request: 1000m +kube_ovn_dpdk_node_memory_request: 2Gi +kube_ovn_dpdk_node_cpu_limit: 1000m +kube_ovn_dpdk_node_memory_limit: 2Gi + +kube_ovn_central_replics: 1 +kube_ovn_controller_replics: 1 + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false +kube_ovn_dpdk_tunnel_iface: br-phy + +## eip snat +kube_ovn_eip_snat_enabled: true + +## keep vm ip +kube_ovn_keep_vm_ip: true + +## cni config priority, default: 01 +kube_ovn_cni_config_priority: 01 diff --git a/kubespray/roles/network_plugin/kube-ovn/tasks/main.yml b/kubespray/roles/network_plugin/kube-ovn/tasks/main.yml new file mode 100644 index 0000000..f720c51 --- /dev/null +++ b/kubespray/roles/network_plugin/kube-ovn/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Kube-OVN | Label ovn-db node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kube-OVN | Create Kube-OVN manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: kube-ovn-crd, file: cni-kube-ovn-crd.yml} + - {name: ovn, file: cni-ovn.yml} + - {name: kube-ovn, file: cni-kube-ovn.yml} + register: kube_ovn_node_manifests diff --git a/kubespray/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 b/kubespray/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 new file mode 100644 index 0000000..5878d2c --- /dev/null +++ b/kubespray/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 @@ -0,0 +1,1160 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpc-nat-gateways.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vpc-nat-gateways + singular: vpc-nat-gateway + shortNames: + - vpc-nat-gw + kind: VpcNatGateway + listKind: VpcNatGatewayList + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.vpc + name: Vpc + type: string + - jsonPath: .spec.subnet + name: Subnet + type: string + - jsonPath: .spec.lanIp + name: LanIP + type: string + name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + lanIp: + type: string + subnet: + type: string + vpc: + type: string + selector: + type: array + items: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-eips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-eips + singular: iptables-eip + shortNames: + - eip + kind: IptablesEIP + listKind: IptablesEIPList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.ip + name: IP + type: string + - jsonPath: .spec.macAddress + name: Mac + type: string + - jsonPath: .status.nat + name: Nat + type: string + - jsonPath: .spec.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + ip: + type: string + nat: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + v4ip: + type: string + v6ip: + type: string + macAddress: + type: string + natGwDp: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-fip-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-fip-rules + singular: iptables-fip-rule + shortNames: + - fip + kind: IptablesFIPRule + listKind: IptablesFIPRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: Eip + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .spec.internalIp + name: InternalIp + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + internalIp: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-dnat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-dnat-rules + singular: iptables-dnat-rule + shortNames: + - dnat + kind: IptablesDnatRule + listKind: IptablesDnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: Eip + type: string + - jsonPath: .spec.protocol + name: Protocol + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .spec.internalIp + name: InternalIp + type: string + - jsonPath: .spec.externalPort + name: ExternalPort + type: string + - jsonPath: .spec.internalPort + name: InternalPort + type: string + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + externalPort: + type: string + protocol: + type: string + internalIp: + type: string + internalPort: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-snat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-snat-rules + singular: iptables-snat-rule + shortNames: + - snat + kind: IptablesSnatRule + listKind: IptablesSnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: EIP + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .spec.internalCIDR + name: InternalCIDR + type: string + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + internalCIDR: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpcs.kubeovn.io +spec: + group: kubeovn.io + versions: + - additionalPrinterColumns: + - jsonPath: .status.standby + name: Standby + type: boolean + - jsonPath: .status.subnets + name: Subnets + type: string + - jsonPath: .spec.namespaces + name: Namespaces + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + spec: + properties: + namespaces: + items: + type: string + type: array + staticRoutes: + items: + properties: + policy: + type: string + cidr: + type: string + nextHopIP: + type: string + type: object + type: array + policyRoutes: + items: + properties: + priority: + type: integer + action: + type: string + match: + type: string + nextHopIP: + type: string + type: object + type: array + vpcPeerings: + items: + properties: + remoteVpc: + type: string + localConnectIP: + type: string + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + type: string + lastUpdateTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + default: + type: boolean + defaultLogicalSwitch: + type: string + router: + type: string + standby: + type: boolean + subnets: + items: + type: string + type: array + vpcPeerings: + items: + type: string + type: array + tcpLoadBalancer: + type: string + tcpSessionLoadBalancer: + type: string + udpLoadBalancer: + type: string + udpSessionLoadBalancer: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + names: + kind: Vpc + listKind: VpcList + plural: vpcs + shortNames: + - vpc + singular: vpc + scope: Cluster +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ips.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: V4IP + type: string + jsonPath: .spec.v4IpAddress + - name: V6IP + type: string + jsonPath: .spec.v6IpAddress + - name: Mac + type: string + jsonPath: .spec.macAddress + - name: Node + type: string + jsonPath: .spec.nodeName + - name: Subnet + type: string + jsonPath: .spec.subnet + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + podName: + type: string + namespace: + type: string + subnet: + type: string + attachSubnets: + type: array + items: + type: string + nodeName: + type: string + ipAddress: + type: string + v4IpAddress: + type: string + v6IpAddress: + type: string + attachIps: + type: array + items: + type: string + macAddress: + type: string + attachMacs: + type: array + items: + type: string + containerID: + type: string + podType: + type: string + scope: Cluster + names: + plural: ips + singular: ip + kind: IP + shortNames: + - ip +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vips + singular: vip + shortNames: + - vip + kind: Vip + listKind: VipList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: V4IP + type: string + jsonPath: .spec.v4ip + - name: PV4IP + type: string + jsonPath: .spec.parentV4ip + - name: Mac + type: string + jsonPath: .spec.macAddress + - name: PMac + type: string + jsonPath: .spec.ParentMac + - name: V6IP + type: string + jsonPath: .spec.v6ip + - name: PV6IP + type: string + jsonPath: .spec.parentV6ip + - name: Subnet + type: string + jsonPath: .spec.subnet + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + mac: + type: string + pv4ip: + type: string + pv6ip: + type: string + pmac: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + namespace: + type: string + subnet: + type: string + attachSubnets: + type: array + items: + type: string + v4ip: + type: string + macAddress: + type: string + v6ip: + type: string + parentV4ip: + type: string + parentMac: + type: string + parentV6ip: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: subnets.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Provider + type: string + jsonPath: .spec.provider + - name: Vpc + type: string + jsonPath: .spec.vpc + - name: Protocol + type: string + jsonPath: .spec.protocol + - name: CIDR + type: string + jsonPath: .spec.cidrBlock + - name: Private + type: boolean + jsonPath: .spec.private + - name: NAT + type: boolean + jsonPath: .spec.natOutgoing + - name: Default + type: boolean + jsonPath: .spec.default + - name: GatewayType + type: string + jsonPath: .spec.gatewayType + - name: V4Used + type: number + jsonPath: .status.v4usingIPs + - name: V4Available + type: number + jsonPath: .status.v4availableIPs + - name: V6Used + type: number + jsonPath: .status.v6usingIPs + - name: V6Available + type: number + jsonPath: .status.v6availableIPs + - name: ExcludeIPs + type: string + jsonPath: .spec.excludeIps + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + v4availableIPs: + type: number + v4usingIPs: + type: number + v6availableIPs: + type: number + v6usingIPs: + type: number + activateGateway: + type: string + dhcpV4OptionsUUID: + type: string + dhcpV6OptionsUUID: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + vpc: + type: string + default: + type: boolean + protocol: + type: string + enum: + - IPv4 + - IPv6 + - Dual + cidrBlock: + type: string + namespaces: + type: array + items: + type: string + gateway: + type: string + provider: + type: string + excludeIps: + type: array + items: + type: string + vips: + type: array + items: + type: string + gatewayType: + type: string + allowSubnets: + type: array + items: + type: string + gatewayNode: + type: string + natOutgoing: + type: boolean + externalEgressGateway: + type: string + policyRoutingPriority: + type: integer + minimum: 1 + maximum: 32765 + policyRoutingTableID: + type: integer + minimum: 1 + maximum: 2147483647 + not: + enum: + - 252 # compat + - 253 # default + - 254 # main + - 255 # local + private: + type: boolean + vlan: + type: string + logicalGateway: + type: boolean + disableGatewayCheck: + type: boolean + disableInterConnection: + type: boolean + htbqos: + type: string + enableDHCP: + type: boolean + dhcpV4Options: + type: string + dhcpV6Options: + type: string + enableIPv6RA: + type: boolean + ipv6RAConfigs: + type: string + acls: + type: array + items: + type: object + properties: + direction: + type: string + enum: + - from-lport + - to-lport + priority: + type: integer + minimum: 0 + maximum: 32767 + match: + type: string + action: + type: string + enum: + - allow-related + - allow-stateless + - allow + - drop + - reject + scope: Cluster + names: + plural: subnets + singular: subnet + kind: Subnet + shortNames: + - subnet +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vlans.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + id: + type: integer + minimum: 0 + maximum: 4095 + provider: + type: string + vlanId: + type: integer + description: Deprecated in favor of id + providerInterfaceName: + type: string + description: Deprecated in favor of provider + required: + - provider + status: + type: object + properties: + subnets: + type: array + items: + type: string + additionalPrinterColumns: + - name: ID + type: string + jsonPath: .spec.id + - name: Provider + type: string + jsonPath: .spec.provider + scope: Cluster + names: + plural: vlans + singular: vlan + kind: Vlan + shortNames: + - vlan +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: provider-networks.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 12 + not: + enum: + - int + - external + spec: + type: object + properties: + defaultInterface: + type: string + maxLength: 15 + pattern: '^[^/\s]+$' + customInterfaces: + type: array + items: + type: object + properties: + interface: + type: string + maxLength: 15 + pattern: '^[^/\s]+$' + nodes: + type: array + items: + type: string + exchangeLinkName: + type: boolean + excludeNodes: + type: array + items: + type: string + required: + - defaultInterface + status: + type: object + properties: + ready: + type: boolean + readyNodes: + type: array + items: + type: string + vlans: + type: array + items: + type: string + conditions: + type: array + items: + type: object + properties: + node: + type: string + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + additionalPrinterColumns: + - name: DefaultInterface + type: string + jsonPath: .spec.defaultInterface + - name: Ready + type: boolean + jsonPath: .status.ready + scope: Cluster + names: + plural: provider-networks + singular: provider-network + kind: ProviderNetwork + listKind: ProviderNetworkList +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: security-groups.kubeovn.io +spec: + group: kubeovn.io + names: + plural: security-groups + singular: security-group + shortNames: + - sg + kind: SecurityGroup + listKind: SecurityGroupList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + ingressRules: + type: array + items: + type: object + properties: + ipVersion: + type: string + protocol: + type: string + priority: + type: integer + remoteType: + type: string + remoteAddress: + type: string + remoteSecurityGroup: + type: string + portRangeMin: + type: integer + portRangeMax: + type: integer + policy: + type: string + egressRules: + type: array + items: + type: object + properties: + ipVersion: + type: string + protocol: + type: string + priority: + type: integer + remoteType: + type: string + remoteAddress: + type: string + remoteSecurityGroup: + type: string + portRangeMin: + type: integer + portRangeMax: + type: integer + policy: + type: string + allowSameGroupTraffic: + type: boolean + status: + type: object + properties: + portGroup: + type: string + allowSameGroupTraffic: + type: boolean + ingressMd5: + type: string + egressMd5: + type: string + ingressLastSyncSuccess: + type: boolean + egressLastSyncSuccess: + type: boolean + subresources: + status: {} + conversion: + strategy: None +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: htbqoses.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: PRIORITY + type: string + jsonPath: .spec.priority + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + priority: + type: string # Value in range 0 to 4,294,967,295. + scope: Cluster + names: + plural: htbqoses + singular: htbqos + kind: HtbQos + shortNames: + - htbqos diff --git a/kubespray/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/kubespray/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 new file mode 100644 index 0000000..c6eacc1 --- /dev/null +++ b/kubespray/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 @@ -0,0 +1,610 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kube-ovn-controller + namespace: kube-system + annotations: + kubernetes.io/description: | + kube-ovn controller +spec: + replicas: {{ kube_ovn_controller_replics }} + selector: + matchLabels: + app: kube-ovn-controller + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 100% + type: RollingUpdate + template: + metadata: + labels: + app: kube-ovn-controller + component: network + type: infra + spec: + tolerations: + - operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: kube-ovn-controller + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: kube-ovn-controller + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /kube-ovn/start-controller.sh + args: + - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{''}} + - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{''}} + - --default-gateway-check={{ kube_ovn_default_gateway_check|string }} + - --default-logical-gateway={{ kube_ovn_default_logical_gateway|string }} + - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{''}} + - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{''}} + - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}} + - --network-type={{ kube_ovn_network_type }} + - --default-interface-name={{ kube_ovn_default_interface_name|default('') }} + - --default-vlan-id={{ kube_ovn_default_vlan_id }} + - --pod-nic-type={{ kube_ovn_pod_nic_type }} + - --enable-lb={{ kube_ovn_enable_lb|string }} + - --enable-np={{ kube_ovn_enable_np|string }} + - --enable-eip-snat={{ kube_ovn_eip_snat_enabled }} + - --enable-external-vpc={{ kube_ovn_enable_external_vpc|string }} + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-controller.log + - --log_file_max_size=0 + - --keep-vm-ip={{ kube_ovn_keep_vm_ip }} + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - /kube-ovn/kube-ovn-controller-healthcheck + periodSeconds: 3 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - /kube-ovn/kube-ovn-controller-healthcheck + initialDelaySeconds: 300 + periodSeconds: 7 + failureThreshold: 5 + timeoutSeconds: 45 + resources: + requests: + cpu: {{ kube_ovn_controller_cpu_request }} + memory: {{ kube_ovn_controller_memory_request }} + limits: + cpu: {{ kube_ovn_controller_cpu_limit }} + memory: {{ kube_ovn_controller_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls + +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-ovn-cni + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the kube-ovn cni daemon. +spec: + selector: + matchLabels: + app: kube-ovn-cni + template: + metadata: + labels: + app: kube-ovn-cni + component: network + type: infra + spec: + tolerations: + - operator: Exists + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + hostPID: true + initContainers: + - name: install-cni + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/install-cni.sh"] + securityContext: + runAsUser: 0 + privileged: true + volumeMounts: + - mountPath: /opt/cni/bin + name: cni-bin + containers: + - name: cni-server + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - bash + - /kube-ovn/start-cniserver.sh + args: + - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} + - --encap-checksum={{ kube_ovn_encap_checksum | lower }} + - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}} + - --iface={{ kube_ovn_iface|default('') }} + - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} + - --network-type={{ kube_ovn_network_type }} + - --default-interface-name={{ kube_ovn_default_interface_name|default('') }} +{% if kube_ovn_mtu is defined %} + - --mtu={{ kube_ovn_mtu }} +{% endif %} + - --cni-conf-name={{ kube_ovn_cni_config_priority }}-kube-ovn.conflist + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-cni.log + - --log_file_max_size=0 + securityContext: + runAsUser: 0 + privileged: true + env: + - name: kube_ovn_enable_ssl + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MODULES + value: kube_ovn_fastpath.ko + - name: RPMS + value: openvswitch-kmod + volumeMounts: + - name: host-modules + mountPath: /lib/modules + readOnly: true + - name: shared-dir + mountPath: /var/lib/kubelet/pods + - mountPath: /etc/openvswitch + name: systemid + - mountPath: /etc/cni/net.d + name: cni-conf + - mountPath: /run/openvswitch + name: host-run-ovs + mountPropagation: Bidirectional + - mountPath: /run/ovn + name: host-run-ovn + - mountPath: /var/run/netns + name: host-ns + mountPropagation: HostToContainer + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /tmp + name: tmp + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + tcpSocket: + port: 10665 + timeoutSeconds: 3 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + tcpSocket: + port: 10665 + timeoutSeconds: 3 + resources: + requests: + cpu: {{ kube_ovn_cni_server_cpu_request }} + memory: {{ kube_ovn_cni_server_memory_request }} + limits: + cpu: {{ kube_ovn_cni_server_cpu_limit }} + memory: {{ kube_ovn_cni_server_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: shared-dir + hostPath: + path: /var/lib/kubelet/pods + - name: systemid + hostPath: + path: /etc/origin/openvswitch + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: cni-conf + hostPath: + path: /etc/cni/net.d + - name: cni-bin + hostPath: + path: /opt/cni/bin + - name: host-ns + hostPath: + path: /var/run/netns + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: tmp + hostPath: + path: /tmp +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-ovn-pinger + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the openvswitch daemon. +spec: + selector: + matchLabels: + app: kube-ovn-pinger + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: kube-ovn-pinger + component: network + type: infra + spec: + serviceAccountName: ovn + hostPID: true + containers: + - name: pinger + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /kube-ovn/kube-ovn-pinger + args: + - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{''}} + - --external-dns={{ kube_ovn_external_dns }} + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-pinger.log + - --log_file_max_size=0 + securityContext: + runAsUser: 0 + privileged: false + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /lib/modules + name: host-modules + readOnly: true + - mountPath: /run/openvswitch + name: host-run-ovs + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + resources: + requests: + cpu: {{ kube_ovn_pinger_cpu_request }} + memory: {{ kube_ovn_pinger_memory_request }} + limits: + cpu: {{ kube_ovn_pinger_cpu_limit }} + memory: {{ kube_ovn_pinger_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kube-ovn-monitor + namespace: kube-system + annotations: + kubernetes.io/description: | + Metrics for OVN components: northd, nb and sb. +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: kube-ovn-monitor + template: + metadata: + labels: + app: kube-ovn-monitor + component: network + type: infra + spec: + tolerations: + - operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: kube-ovn-monitor + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: kube-ovn-monitor + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/start-ovn-monitor.sh"] + securityContext: + runAsUser: 0 + privileged: false + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + requests: + cpu: {{ kube_ovn_monitor_cpu_request }} + memory: {{ kube_ovn_monitor_memory_request }} + limits: + cpu: {{ kube_ovn_monitor_cpu_limit }} + memory: {{ kube_ovn_monitor_memory_limit }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - cat + - /var/run/ovn/ovn-controller.pid + periodSeconds: 10 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - cat + - /var/run/ovn/ovn-controller.pid + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 5 + timeoutSeconds: 45 + nodeSelector: + kubernetes.io/os: "linux" + kube-ovn/role: "master" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-monitor + namespace: kube-system + labels: + app: kube-ovn-monitor +spec: + ports: + - name: metrics + port: 10661 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-monitor + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-pinger + namespace: kube-system + labels: + app: kube-ovn-pinger +spec: +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-pinger + ports: + - port: 8080 + name: metrics +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-controller + namespace: kube-system + labels: + app: kube-ovn-controller +spec: +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-controller + ports: + - port: 10660 + name: metrics +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-cni + namespace: kube-system + labels: + app: kube-ovn-cni +spec: +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-cni + ports: + - port: 10665 + name: metrics diff --git a/kubespray/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 b/kubespray/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 new file mode 100644 index 0000000..2d8a5c3 --- /dev/null +++ b/kubespray/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 @@ -0,0 +1,513 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ovn + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + name: system:ovn +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - kube-ovn + - apiGroups: + - "kubeovn.io" + resources: + - vpcs + - vpcs/status + - vpc-nat-gateways + - subnets + - subnets/status + - ips + - vips + - vips/status + - vlans + - vlans/status + - provider-networks + - provider-networks/status + - security-groups + - security-groups/status + - htbqoses + - iptables-eips + - iptables-fip-rules + - iptables-dnat-rules + - iptables-snat-rules + - iptables-eips/status + - iptables-fip-rules/status + - iptables-dnat-rules/status + - iptables-snat-rules/status + verbs: + - "*" + - apiGroups: + - "" + resources: + - pods + - pods/exec + - namespaces + - nodes + - configmaps + verbs: + - create + - get + - list + - watch + - patch + - update + - apiGroups: + - "k8s.cni.cncf.io" + resources: + - network-attachment-definitions + verbs: + - create + - delete + - get + - list + - update + - apiGroups: + - "" + - networking.k8s.io + - apps + - extensions + resources: + - networkpolicies + - services + - endpoints + - statefulsets + - daemonsets + - deployments + - deployments/scale + verbs: + - create + - delete + - update + - patch + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - "*" + - apiGroups: + - "k8s.cni.cncf.io" + resources: + - network-attachment-definitions + verbs: + - create + - delete + - get + - list + - update + - apiGroups: + - "kubevirt.io" + resources: + - virtualmachines + - virtualmachineinstances + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ovn +roleRef: + name: system:ovn + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: ovn + namespace: kube-system +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-nb + namespace: kube-system +spec: + ports: + - name: ovn-nb + protocol: TCP + port: 6641 + targetPort: 6641 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-nb-leader: "true" + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-sb + namespace: kube-system +spec: + ports: + - name: ovn-sb + protocol: TCP + port: 6642 + targetPort: 6642 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-sb-leader: "true" + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-northd + namespace: kube-system +spec: + ports: + - name: ovn-northd + protocol: TCP + port: 6643 + targetPort: 6643 + type: ClusterIP +{% if enable_dual_stack_networks %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-northd-leader: "true" + sessionAffinity: None +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ovn-central + namespace: kube-system + annotations: + kubernetes.io/description: | + OVN components: northd, nb and sb. +spec: + replicas: {{ kube_ovn_central_replics }} + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: ovn-central + template: + metadata: + labels: + app: ovn-central + component: network + type: infra + spec: + tolerations: + - operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: ovn-central + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: ovn-central + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/start-db.sh"] + securityContext: + capabilities: + add: ["SYS_NICE"] + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: {{ kube_ovn_db_cpu_request }} + memory: {{ kube_ovn_db_memory_request }} + limits: + cpu: {{ kube_ovn_db_cpu_limit }} + memory: {{ kube_ovn_db_memory_limit }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - bash + - /kube-ovn/ovn-healthcheck.sh + periodSeconds: 15 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - bash + - /kube-ovn/ovn-healthcheck.sh + initialDelaySeconds: 30 + periodSeconds: 15 + failureThreshold: 5 + timeoutSeconds: 45 + nodeSelector: + kubernetes.io/os: "linux" + kube-ovn/role: "master" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ovs-ovn + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the openvswitch daemon. +spec: + selector: + matchLabels: + app: ovs + updateStrategy: + type: OnDelete + template: + metadata: + labels: + app: ovs + component: network + type: infra + spec: + tolerations: + - operator: Exists + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + hostPID: true + containers: + - name: openvswitch + image: {% if kube_ovn_dpdk_enabled %}{{ kube_ovn_dpdk_container_image_repo }}:{{ kube_ovn_dpdk_container_image_tag }}{% else %}{{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}{% endif %} + + imagePullPolicy: {{ k8s_image_pull_policy }} + command: [{% if kube_ovn_dpdk_enabled %}"/kube-ovn/start-ovs-dpdk.sh"{% else %}"/kube-ovn/start-ovs.sh"{% endif %}] + securityContext: + runAsUser: 0 + privileged: true + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +{% if not kube_ovn_dpdk_enabled %} + - name: HW_OFFLOAD + value: "{{ kube_ovn_hw_offload | string | lower }}" + - name: TUNNEL_TYPE + value: "{{ kube_ovn_tunnel_type }}" +{% endif %} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /var/run/netns + name: host-ns + mountPropagation: HostToContainer + - mountPath: /lib/modules + name: host-modules + readOnly: true + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/cni/net.d + name: cni-conf + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn +{% if kube_ovn_dpdk_enabled %} + - mountPath: /opt/ovs-config + name: host-config-ovs + - mountPath: /dev/hugepages + name: hugepage +{% endif %} + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - bash +{% if kube_ovn_dpdk_enabled %} + - /kube-ovn/ovs-dpdk-healthcheck.sh +{% else %} + - /kube-ovn/ovs-healthcheck.sh +{% endif %} + periodSeconds: 5 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - bash +{% if kube_ovn_dpdk_enabled %} + - /kube-ovn/ovs-dpdk-healthcheck.sh +{% else %} + - /kube-ovn/ovs-healthcheck.sh +{% endif %} + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 5 + timeoutSeconds: 45 + resources: +{% if kube_ovn_dpdk_enabled %} + requests: + cpu: {{ kube_ovn_dpdk_node_cpu_request }} + memory: {{ kube_ovn_dpdk_node_memory_request }} + limits: + cpu: {{ kube_ovn_dpdk_node_cpu_limit }} + memory: {{ kube_ovn_dpdk_node_memory_limit }} + hugepages-1Gi: 1Gi +{% else %} + requests: + cpu: {{ kube_ovn_node_cpu_request }} + memory: {{ kube_ovn_node_memory_request }} + limits: + cpu: {{ kube_ovn_node_cpu_limit }} + memory: {{ kube_ovn_node_memory_limit }} +{% endif %} + nodeSelector: + kubernetes.io/os: "linux" + ovn.kubernetes.io/ovs_dp_type: "kernel" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-ns + hostPath: + path: /var/run/netns + - name: cni-conf + hostPath: + path: /etc/cni/net.d + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn +{% if kube_ovn_dpdk_enabled %} + - name: host-config-ovs + hostPath: + path: /opt/ovs-config + type: DirectoryOrCreate + - name: hugepage + emptyDir: + medium: HugePages +{% endif %} + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls diff --git a/kubespray/roles/network_plugin/kube-router/OWNERS b/kubespray/roles/network_plugin/kube-router/OWNERS new file mode 100644 index 0000000..c95aad2 --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - bozzo +reviewers: + - bozzo \ No newline at end of file diff --git a/kubespray/roles/network_plugin/kube-router/defaults/main.yml b/kubespray/roles/network_plugin/kube-router/defaults/main.yml new file mode 100644 index 0000000..5d4dccc --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/defaults/main.yml @@ -0,0 +1,66 @@ +--- +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +kube_router_advertise_loadbalancer_ip: false + +# Adjust manifest of kube-router daemonset template with DSR needed changes +kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +kube_router_dns_policy: ClusterFirstWithHostNet + +# Adds annotations to kubernetes nodes for advanced configuration of BGP Peers. +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md + +# Array of annotations for master +kube_router_annotations_master: [] + +# Array of annotations for every node +kube_router_annotations_node: [] + +# Array of common annotations for every node +kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +kube_router_metrics_port: 9255 diff --git a/kubespray/roles/network_plugin/kube-router/handlers/main.yml b/kubespray/roles/network_plugin/kube-router/handlers/main.yml new file mode 100644 index 0000000..7bdfc5d --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/handlers/main.yml @@ -0,0 +1,20 @@ +--- +- name: reset_kube_router + command: /bin/true + notify: + - Kube-router | delete kube-router docker containers + - Kube-router | delete kube-router crio/containerd containers + +- name: Kube-router | delete kube-router docker containers + shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" + register: docker_kube_router_remove + until: docker_kube_router_remove is succeeded + retries: 5 + when: container_manager in ["docker"] + +- name: Kube-router | delete kube-router crio/containerd containers + shell: '{{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + register: crictl_kube_router_remove + until: crictl_kube_router_remove is succeeded + retries: 5 + when: container_manager in ["crio", "containerd"] diff --git a/kubespray/roles/network_plugin/kube-router/meta/main.yml b/kubespray/roles/network_plugin/kube-router/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/roles/network_plugin/kube-router/tasks/annotate.yml b/kubespray/roles/network_plugin/kube-router/tasks/annotate.yml new file mode 100644 index 0000000..e91249f --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/tasks/annotate.yml @@ -0,0 +1,21 @@ +--- +- name: kube-router | Add annotations on kube_control_plane + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_master }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] + +- name: kube-router | Add annotations on kube_node + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_node }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node'] + +- name: kube-router | Add common annotations on all servers + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_all }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster'] diff --git a/kubespray/roles/network_plugin/kube-router/tasks/main.yml b/kubespray/roles/network_plugin/kube-router/tasks/main.yml new file mode 100644 index 0000000..4cc078a --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: kube-router | Create annotations + include: annotate.yml + tags: annotate + +- name: kube-router | Create config directory + file: + path: /var/lib/kube-router + state: directory + owner: "{{ kube_owner }}" + recurse: true + mode: 0755 + +- name: kube-router | Create kubeconfig + template: + src: kubeconfig.yml.j2 + dest: /var/lib/kube-router/kubeconfig + mode: 0644 + owner: "{{ kube_owner }}" + notify: + - reset_kube_router + +- name: kube-router | Slurp cni config + slurp: + src: /etc/cni/net.d/10-kuberouter.conflist + register: cni_config_slurp + ignore_errors: true # noqa ignore-errors + +- name: kube-router | Set cni_config variable + set_fact: + cni_config: "{{ cni_config_slurp.content | b64decode | from_json }}" + when: + - not cni_config_slurp.failed + +- name: kube-router | Set host_subnet variable + set_fact: + host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}" + when: + - cni_config is defined + - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0 + +- name: kube-router | Create cni config + template: + src: cni-conf.json.j2 + dest: /etc/cni/net.d/10-kuberouter.conflist + mode: 0644 + owner: "{{ kube_owner }}" + notify: + - reset_kube_router + +- name: kube-router | Delete old configuration + file: + path: /etc/cni/net.d/10-kuberouter.conf + state: absent + +- name: kube-router | Create manifest + template: + src: kube-router.yml.j2 + dest: "{{ kube_config_dir }}/kube-router.yml" + mode: 0644 + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true diff --git a/kubespray/roles/network_plugin/kube-router/tasks/reset.yml b/kubespray/roles/network_plugin/kube-router/tasks/reset.yml new file mode 100644 index 0000000..7b8ad2c --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/tasks/reset.yml @@ -0,0 +1,28 @@ +--- +- name: reset | check kube-dummy-if network device + stat: + path: /sys/class/net/kube-dummy-if + get_attributes: no + get_checksum: no + get_mime: no + register: kube_dummy_if + +- name: reset | remove the network device created by kube-router + command: ip link del kube-dummy-if + when: kube_dummy_if.stat.exists + +- name: check kube-bridge exists + stat: + path: /sys/class/net/kube-bridge + get_attributes: no + get_checksum: no + get_mime: no + register: kube_bridge_if + +- name: reset | donw the network bridge create by kube-router + command: ip link set kube-bridge down + when: kube_bridge_if.stat.exists + +- name: reset | remove the network bridge create by kube-router + command: ip link del kube-bridge + when: kube_bridge_if.stat.exists diff --git a/kubespray/roles/network_plugin/kube-router/templates/cni-conf.json.j2 b/kubespray/roles/network_plugin/kube-router/templates/cni-conf.json.j2 new file mode 100644 index 0000000..91fafac --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/templates/cni-conf.json.j2 @@ -0,0 +1,27 @@ +{ + "cniVersion":"0.3.0", + "name":"kubernetes", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, +{% if kube_router_support_hairpin_mode %} + "hairpinMode":true, +{% endif %} + "ipam":{ +{% if host_subnet is defined %} + "subnet": "{{ host_subnet }}", +{% endif %} + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] +} diff --git a/kubespray/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/kubespray/roles/network_plugin/kube-router/templates/kube-router.yml.j2 new file mode 100644 index 0000000..ab677ab --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/templates/kube-router.yml.j2 @@ -0,0 +1,220 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + minReadySeconds: 3 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: +{% if kube_router_enable_metrics %} + prometheus.io/path: {{ kube_router_metrics_path }} + prometheus.io/port: "{{ kube_router_metrics_port }}" + prometheus.io/scrape: "true" +{% endif %} + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + containers: + - name: kube-router + image: {{ kube_router_image_repo }}:{{ kube_router_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --run-router={{ kube_router_run_router | bool }} + - --run-firewall={{ kube_router_run_firewall | bool }} + - --run-service-proxy={{ kube_router_run_service_proxy | bool }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --bgp-graceful-restart=true +{% if kube_router_advertise_cluster_ip %} + - --advertise-cluster-ip +{% endif %} +{% if kube_router_advertise_external_ip %} + - --advertise-external-ip +{% endif %} +{% if kube_router_advertise_loadbalancer_ip %} + - --advertise-loadbalancer-ip +{% endif %} +{% if kube_router_cluster_asn %} + - --cluster-asn={{ kube_router_cluster_asn }} +{% endif %} +{% if kube_router_peer_router_asns %} + - --peer-router-asns={{ kube_router_peer_router_asns }} +{% endif %} +{% if kube_router_peer_router_ips %} + - --peer-router-ips={{ kube_router_peer_router_ips }} +{% endif %} +{% if kube_router_peer_router_ports %} + - --peer-router-ports={{ kube_router_peer_router_ports }} +{% endif %} +{% if kube_router_enable_metrics %} + - --metrics-path={{ kube_router_metrics_path }} + - --metrics-port={{ kube_router_metrics_port }} +{% endif %} +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - --runtime-endpoint=unix:///var/run/docker.sock +{% endif %} +{% if container_manager == "containerd" %} +{% endif %} + - --runtime-endpoint=unix:///run/containerd/containerd.sock +{% endif %} +{% for arg in kube_router_extra_args %} + - "{{ arg }}" +{% endfor %} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - name: docker-socket + mountPath: /var/run/docker.sock + readOnly: true +{% endif %} +{% if container_manager == "containerd" %} + - name: containerd-socket + mountPath: /run/containerd/containerd.sock + readOnly: true +{% endif %} +{% endif %} + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false +{% if kube_router_enable_metrics %} + ports: + - containerPort: {{ kube_router_metrics_port }} + hostPort: {{ kube_router_metrics_port }} + name: metrics + protocol: TCP +{% endif %} + hostNetwork: true + dnsPolicy: {{ kube_router_dns_policy }} +{% if kube_router_enable_dsr %} + hostIPC: true + hostPID: true +{% endif %} + tolerations: + - operator: Exists + volumes: +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - name: docker-socket + hostPath: + path: /var/run/docker.sock + type: Socket +{% endif %} +{% if container_manager == "containerd" %} + - name: containerd-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket +{% endif %} +{% endif %} + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kubeconfig + hostPath: + path: /var/lib/kube-router + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/kubespray/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 b/kubespray/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 new file mode 100644 index 0000000..42fd317 --- /dev/null +++ b/kubespray/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusterCIDR: {{ kube_pods_subnet }} +clusters: +- name: cluster + cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-router + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +contexts: +- context: + cluster: cluster + user: kube-router + name: kube-router-context +current-context: kube-router-context diff --git a/kubespray/roles/network_plugin/macvlan/OWNERS b/kubespray/roles/network_plugin/macvlan/OWNERS new file mode 100644 index 0000000..c5dfbc7 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - simon +reviewers: + - simon diff --git a/kubespray/roles/network_plugin/macvlan/defaults/main.yml b/kubespray/roles/network_plugin/macvlan/defaults/main.yml new file mode 100644 index 0000000..70a8dd0 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/defaults/main.yml @@ -0,0 +1,6 @@ +--- +macvlan_interface: eth0 +enable_nat_default_gateway: true + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/kubespray/roles/network_plugin/macvlan/files/ifdown-local b/kubespray/roles/network_plugin/macvlan/files/ifdown-local new file mode 100644 index 0000000..003b8a1 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/files/ifdown-local @@ -0,0 +1,6 @@ +#!/bin/bash + +POSTDOWNNAME="/etc/sysconfig/network-scripts/post-down-$1" +if [ -x $POSTDOWNNAME ]; then + exec $POSTDOWNNAME +fi diff --git a/kubespray/roles/network_plugin/macvlan/files/ifdown-macvlan b/kubespray/roles/network_plugin/macvlan/files/ifdown-macvlan new file mode 100755 index 0000000..4d26db5 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/files/ifdown-macvlan @@ -0,0 +1,41 @@ +#!/bin/bash +# +# initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-eth" +fi + +${OTHERSCRIPT} ${CONFIG} + +ip link del ${DEVICE} type ${TYPE:-macvlan} + diff --git a/kubespray/roles/network_plugin/macvlan/files/ifup-local b/kubespray/roles/network_plugin/macvlan/files/ifup-local new file mode 100755 index 0000000..3b6891e --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/files/ifup-local @@ -0,0 +1,6 @@ +#!/bin/bash + +POSTUPNAME="/etc/sysconfig/network-scripts/post-up-$1" +if [ -x $POSTUPNAME ]; then + exec $POSTUPNAME +fi diff --git a/kubespray/roles/network_plugin/macvlan/files/ifup-macvlan b/kubespray/roles/network_plugin/macvlan/files/ifup-macvlan new file mode 100755 index 0000000..8dc61aa --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/files/ifup-macvlan @@ -0,0 +1,44 @@ +#!/bin/bash +# +# initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-eth" +fi + +ip link add \ + link ${MACVLAN_PARENT} \ + name ${DEVICE} \ + type ${TYPE:-macvlan} mode ${MACVLAN_MODE:-private} + +${OTHERSCRIPT} ${CONFIG} + diff --git a/kubespray/roles/network_plugin/macvlan/handlers/main.yml b/kubespray/roles/network_plugin/macvlan/handlers/main.yml new file mode 100644 index 0000000..abb018c --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/handlers/main.yml @@ -0,0 +1,19 @@ +--- +- name: Macvlan | restart network + command: /bin/true + notify: + - Macvlan | reload network + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Macvlan | reload network + service: + name: >- + {% if ansible_os_family == "RedHat" -%} + network + {%- elif ansible_distribution == "Ubuntu" and ansible_distribution_release == "bionic" -%} + systemd-networkd + {%- elif ansible_os_family == "Debian" -%} + networking + {%- endif %} + state: restarted + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and kube_network_plugin not in ['canal', 'calico'] diff --git a/kubespray/roles/network_plugin/macvlan/meta/main.yml b/kubespray/roles/network_plugin/macvlan/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/roles/network_plugin/macvlan/tasks/main.yml b/kubespray/roles/network_plugin/macvlan/tasks/main.yml new file mode 100644 index 0000000..bdc2dbc --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/tasks/main.yml @@ -0,0 +1,110 @@ +--- +- name: Macvlan | Retrieve Pod Cidr + command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" + changed_when: false + register: node_pod_cidr_cmd + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Macvlan | set node_pod_cidr + set_fact: + node_pod_cidr={{ node_pod_cidr_cmd.stdout }} + +- name: Macvlan | Retrieve default gateway network interface + become: false + raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/' + changed_when: false + register: node_default_gateway_interface_cmd + +- name: Macvlan | set node_default_gateway_interface + set_fact: + node_default_gateway_interface={{ node_default_gateway_interface_cmd.stdout | trim }} + +- name: Macvlan | Install network gateway interface on debian + template: + src: debian-network-macvlan.cfg.j2 + dest: /etc/network/interfaces.d/60-mac0.cfg + mode: 0644 + notify: Macvlan | restart network + when: ansible_os_family in ["Debian"] + +- block: + - name: Macvlan | Install macvlan script on centos + copy: + src: "{{ item }}" + dest: /etc/sysconfig/network-scripts/ + owner: root + group: root + mode: "0755" + with_fileglob: + - files/* + + - name: Macvlan | Install post-up script on centos + copy: + src: "files/ifup-local" + dest: /sbin/ + owner: root + group: root + mode: "0755" + when: enable_nat_default_gateway + + - name: Macvlan | Install network gateway interface on centos + template: + src: "{{ item.src }}.j2" + dest: "/etc/sysconfig/network-scripts/{{ item.dst }}" + mode: 0644 + with_items: + - {src: centos-network-macvlan.cfg, dst: ifcfg-mac0 } + - {src: centos-routes-macvlan.cfg, dst: route-mac0 } + - {src: centos-postup-macvlan.cfg, dst: post-up-mac0 } + notify: Macvlan | restart network + + when: ansible_os_family == "RedHat" + +- block: + - name: Macvlan | Install service nat via gateway on Flatcar Container Linux + template: + src: coreos-service-nat_ouside.j2 + dest: /etc/systemd/system/enable_nat_ouside.service + mode: 0644 + when: enable_nat_default_gateway + + - name: Macvlan | Enable service nat via gateway on Flatcar Container Linux + command: "{{ item }}" + with_items: + - systemctl daemon-reload + - systemctl enable enable_nat_ouside.service + when: enable_nat_default_gateway + + - name: Macvlan | Install network gateway interface on Flatcar Container Linux + template: + src: "{{ item.src }}.j2" + dest: "/etc/systemd/network/{{ item.dst }}" + mode: 0644 + with_items: + - {src: coreos-device-macvlan.cfg, dst: macvlan.netdev } + - {src: coreos-interface-macvlan.cfg, dst: output.network } + - {src: coreos-network-macvlan.cfg, dst: macvlan.network } + notify: Macvlan | restart network + + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + +- name: Macvlan | Install cni definition for Macvlan + template: + src: 10-macvlan.conf.j2 + dest: /etc/cni/net.d/10-macvlan.conf + mode: 0644 + +- name: Macvlan | Install loopback definition for Macvlan + template: + src: 99-loopback.conf.j2 + dest: /etc/cni/net.d/99-loopback.conf + mode: 0644 + +- name: Enable net.ipv4.conf.all.arp_notify in sysctl + sysctl: + name: net.ipv4.conf.all.arp_notify + value: 1 + sysctl_set: yes + sysctl_file: "{{ sysctl_file_path }}" + state: present + reload: yes diff --git a/kubespray/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 b/kubespray/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 new file mode 100644 index 0000000..10598a2 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 @@ -0,0 +1,15 @@ +{ + "cniVersion": "0.4.0", + "name": "mynet", + "type": "macvlan", + "master": "{{ macvlan_interface }}", + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ node_pod_cidr }}", + "routes": [ + { "dst": "0.0.0.0/0" } + ], + "gateway": "{{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" + } +} diff --git a/kubespray/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 b/kubespray/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 new file mode 100644 index 0000000..b41ab65 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 @@ -0,0 +1,5 @@ +{ + "cniVersion": "0.2.0", + "name": "lo", + "type": "loopback" +} diff --git a/kubespray/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 new file mode 100644 index 0000000..e7bad78 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 @@ -0,0 +1,14 @@ +DEVICE=mac0 +DEVICETYPE=macvlan +TYPE=macvlan +BOOTPROTO=none +ONBOOT=yes +NM_CONTROLLED=no + +MACVLAN_PARENT={{ macvlan_interface }} +MACVLAN_MODE=bridge + +IPADDR={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }} +NETMASK={{ node_pod_cidr|ipaddr('netmask') }} +NETWORK={{ node_pod_cidr|ipaddr('network') }} + diff --git a/kubespray/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 new file mode 100644 index 0000000..f3edd99 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 @@ -0,0 +1,4 @@ +{% if enable_nat_default_gateway %} +iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} + diff --git a/kubespray/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 new file mode 100644 index 0000000..35cd5b5 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 @@ -0,0 +1,4 @@ +{% if enable_nat_default_gateway %} +iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} + diff --git a/kubespray/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 new file mode 100644 index 0000000..60400dd --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 @@ -0,0 +1,7 @@ +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} +{{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} diff --git a/kubespray/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 new file mode 100644 index 0000000..2418dac --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 @@ -0,0 +1,6 @@ +[NetDev] +Name=mac0 +Kind=macvlan + +[MACVLAN] +Mode=bridge diff --git a/kubespray/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 new file mode 100644 index 0000000..342f680 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 @@ -0,0 +1,6 @@ +[Match] +Name={{ macvlan_interface }} + +[Network] +MACVLAN=mac0 +DHCP=yes diff --git a/kubespray/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 new file mode 100644 index 0000000..696eba5 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 @@ -0,0 +1,18 @@ +[Match] +Name=mac0 + +[Network] +Address={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}/{{ node_pod_cidr|ipaddr('prefix') }} + +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} +[Route] +Gateway={{ hostvars[host]['access_ip'] }} +Destination={{ hostvars[host]['node_pod_cidr'] }} +GatewayOnlink=yes + +{% endif %} +{% endif %} +{% endfor %} + diff --git a/kubespray/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 b/kubespray/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 new file mode 100644 index 0000000..5f00b00 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 @@ -0,0 +1,6 @@ +[Service] +Type=oneshot +ExecStart=/bin/bash -c "iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE" + +[Install] +WantedBy=sys-subsystem-net-devices-mac0.device diff --git a/kubespray/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 b/kubespray/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 new file mode 100644 index 0000000..9edd6d1 --- /dev/null +++ b/kubespray/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 @@ -0,0 +1,27 @@ +auto mac0 +iface mac0 inet static + address {{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }} + network {{ node_pod_cidr|ipaddr('network') }} + netmask {{ node_pod_cidr|ipaddr('netmask') }} + broadcast {{ node_pod_cidr|ipaddr('broadcast') }} + pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} + post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} +{% if enable_nat_default_gateway %} + post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} + post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} + post-down iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE + post-down ip link delete mac0 + diff --git a/kubespray/roles/network_plugin/meta/main.yml b/kubespray/roles/network_plugin/meta/main.yml new file mode 100644 index 0000000..cb013fc --- /dev/null +++ b/kubespray/roles/network_plugin/meta/main.yml @@ -0,0 +1,48 @@ +--- +dependencies: + - role: network_plugin/cni + + - role: network_plugin/cilium + when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool + tags: + - cilium + + - role: network_plugin/calico + when: kube_network_plugin == 'calico' + tags: + - calico + + - role: network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: + - flannel + + - role: network_plugin/weave + when: kube_network_plugin == 'weave' + tags: + - weave + + - role: network_plugin/canal + when: kube_network_plugin == 'canal' + tags: + - canal + + - role: network_plugin/macvlan + when: kube_network_plugin == 'macvlan' + tags: + - macvlan + + - role: network_plugin/kube-ovn + when: kube_network_plugin == 'kube-ovn' + tags: + - kube-ovn + + - role: network_plugin/kube-router + when: kube_network_plugin == 'kube-router' + tags: + - kube-router + + - role: network_plugin/multus + when: kube_network_plugin_multus + tags: + - multus diff --git a/kubespray/roles/network_plugin/multus/defaults/main.yml b/kubespray/roles/network_plugin/multus/defaults/main.yml new file mode 100644 index 0000000..cbeb4cb --- /dev/null +++ b/kubespray/roles/network_plugin/multus/defaults/main.yml @@ -0,0 +1,10 @@ +--- +multus_conf_file: "auto" +multus_cni_conf_dir_host: "/etc/cni/net.d" +multus_cni_bin_dir_host: "/opt/cni/bin" +multus_cni_run_dir_host: "/run" +multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" +multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}" +multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}" +multus_cni_version: "0.4.0" +multus_kubeconfig_file_host: "{{ (multus_cni_conf_dir_host, '/multus.d/multus.kubeconfig') | join }}" diff --git a/kubespray/roles/network_plugin/multus/files/multus-clusterrole.yml b/kubespray/roles/network_plugin/multus/files/multus-clusterrole.yml new file mode 100644 index 0000000..b574069 --- /dev/null +++ b/kubespray/roles/network_plugin/multus/files/multus-clusterrole.yml @@ -0,0 +1,28 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update diff --git a/kubespray/roles/network_plugin/multus/files/multus-clusterrolebinding.yml b/kubespray/roles/network_plugin/multus/files/multus-clusterrolebinding.yml new file mode 100644 index 0000000..2d1e1a4 --- /dev/null +++ b/kubespray/roles/network_plugin/multus/files/multus-clusterrolebinding.yml @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system diff --git a/kubespray/roles/network_plugin/multus/files/multus-crd.yml b/kubespray/roles/network_plugin/multus/files/multus-crd.yml new file mode 100644 index 0000000..24b2c58 --- /dev/null +++ b/kubespray/roles/network_plugin/multus/files/multus-crd.yml @@ -0,0 +1,45 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string diff --git a/kubespray/roles/network_plugin/multus/files/multus-serviceaccount.yml b/kubespray/roles/network_plugin/multus/files/multus-serviceaccount.yml new file mode 100644 index 0000000..6242308 --- /dev/null +++ b/kubespray/roles/network_plugin/multus/files/multus-serviceaccount.yml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system diff --git a/kubespray/roles/network_plugin/multus/meta/main.yml b/kubespray/roles/network_plugin/multus/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/roles/network_plugin/multus/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/roles/network_plugin/multus/tasks/main.yml b/kubespray/roles/network_plugin/multus/tasks/main.yml new file mode 100644 index 0000000..3552b05 --- /dev/null +++ b/kubespray/roles/network_plugin/multus/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Multus | Copy manifest files + copy: + src: "{{ item.file }}" + dest: "{{ kube_config_dir }}" + mode: 0644 + with_items: + - {name: multus-crd, file: multus-crd.yml, type: customresourcedefinition} + - {name: multus-serviceaccount, file: multus-serviceaccount.yml, type: serviceaccount} + - {name: multus-clusterrole, file: multus-clusterrole.yml, type: clusterrole} + - {name: multus-clusterrolebinding, file: multus-clusterrolebinding.yml, type: clusterrolebinding} + register: multus_manifest_1 + +- name: Multus | Copy manifest templates + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: multus-daemonset, file: multus-daemonset.yml, type: daemonset} + register: multus_manifest_2 diff --git a/kubespray/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/kubespray/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 new file mode 100644 index 0000000..494dee2 --- /dev/null +++ b/kubespray/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 @@ -0,0 +1,71 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-multus-ds-{{ image_arch }} + namespace: kube-system + labels: + tier: node + app: multus +spec: + selector: + matchLabels: + tier: node + app: multus + template: + metadata: + labels: + tier: node + app: multus + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/arch: {{ image_arch }} + tolerations: + - operator: Exists + serviceAccountName: multus + containers: + - name: kube-multus + image: {{ multus_image_repo }}:{{ multus_image_tag }} + command: ["/entrypoint.sh"] + args: + - "--cni-conf-dir={{ multus_cni_conf_dir }}" + - "--cni-bin-dir={{ multus_cni_bin_dir }}" + - "--multus-conf-file={{ multus_conf_file }}" + - "--multus-kubeconfig-file-host={{ multus_kubeconfig_file_host }}" + - "--cni-version={{ multus_cni_version }}" + resources: + requests: + cpu: "100m" + memory: "90Mi" + limits: + cpu: "100m" + memory: "90Mi" + securityContext: + privileged: true +{% if container_manager == 'crio' %} + capabilities: + add: ["SYS_ADMIN"] +{% endif %} + volumeMounts: +{% if container_manager == 'crio' %} + - name: run + mountPath: {{ multus_cni_run_dir }} +{% endif %} + - name: cni + mountPath: {{ multus_cni_conf_dir }} + - name: cnibin + mountPath: {{ multus_cni_bin_dir }} + volumes: +{% if container_manager == 'crio' %} + - name: run + hostPath: + path: {{ multus_cni_run_dir_host }} +{% endif %} + - name: cni + hostPath: + path: {{ multus_cni_conf_dir_host }} + - name: cnibin + hostPath: + path: {{ multus_cni_bin_dir_host }} diff --git a/kubespray/roles/network_plugin/ovn4nfv/tasks/main.yml b/kubespray/roles/network_plugin/ovn4nfv/tasks/main.yml new file mode 100644 index 0000000..da21266 --- /dev/null +++ b/kubespray/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: ovn4nfv | Label control-plane node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: ovn4nfv | Create ovn4nfv-k8s manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0644 + with_items: + - {name: ovn-daemonset, file: ovn-daemonset.yml} + - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} + register: ovn4nfv_node_manifests diff --git a/kubespray/roles/network_plugin/weave/defaults/main.yml b/kubespray/roles/network_plugin/weave/defaults/main.yml new file mode 100644 index 0000000..47469ae --- /dev/null +++ b/kubespray/roles/network_plugin/weave/defaults/main.yml @@ -0,0 +1,64 @@ +--- + +# Weave's network password for encryption, if null then no network encryption. +weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +weave_ipalloc_range: "{{ kube_pods_subnet }}" + +# Set to 0 to disable Network Policy Controller (default is on) +weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +weave_iptables_backend: ~ + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +weave_npc_extra_args: ~ diff --git a/kubespray/roles/network_plugin/weave/meta/main.yml b/kubespray/roles/network_plugin/weave/meta/main.yml new file mode 100644 index 0000000..9b7065f --- /dev/null +++ b/kubespray/roles/network_plugin/weave/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/kubespray/roles/network_plugin/weave/tasks/main.yml b/kubespray/roles/network_plugin/weave/tasks/main.yml new file mode 100644 index 0000000..ae4a5a4 --- /dev/null +++ b/kubespray/roles/network_plugin/weave/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Weave | Create manifest + template: + src: weave-net.yml.j2 + dest: "{{ kube_config_dir }}/weave-net.yml" + mode: 0644 + +- name: Weave | Fix nodePort for Weave + template: + src: 10-weave.conflist.j2 + dest: /etc/cni/net.d/10-weave.conflist + mode: 0644 diff --git a/kubespray/roles/network_plugin/weave/templates/10-weave.conflist.j2 b/kubespray/roles/network_plugin/weave/templates/10-weave.conflist.j2 new file mode 100644 index 0000000..9aab7e9 --- /dev/null +++ b/kubespray/roles/network_plugin/weave/templates/10-weave.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.0", + "name": "weave", + "plugins": [ + { + "name": "weave", + "type": "weave-net", + "hairpinMode": {{ weave_hairpin_mode | bool | lower }} + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] +} diff --git a/kubespray/roles/network_plugin/weave/templates/weave-net.yml.j2 b/kubespray/roles/network_plugin/weave/templates/weave-net.yml.j2 new file mode 100644 index 0000000..84c4fa0 --- /dev/null +++ b/kubespray/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -0,0 +1,297 @@ +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: weave-net + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - pods + - namespaces + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - 'networking.k8s.io' + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - nodes/status + verbs: + - patch + - update + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: weave-net + labels: + name: weave-net + roleRef: + kind: ClusterRole + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - weave-net + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + roleRef: + kind: Role + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + spec: + # Wait 5 seconds to let pod connect before rolling next pod + selector: + matchLabels: + name: weave-net + minReadySeconds: 5 + template: + metadata: + labels: + name: weave-net + spec: + initContainers: + - name: weave-init + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /home/weave/init.sh + env: + securityContext: + privileged: true + volumeMounts: + - name: cni-bin + mountPath: /host/opt + - name: cni-bin2 + mountPath: /host/home + - name: cni-conf + mountPath: /host/etc + - name: lib-modules + mountPath: /lib/modules + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + containers: + - name: weave + command: + - /home/weave/launch.sh + env: + - name: INIT_CONTAINER + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: WEAVE_PASSWORD + valueFrom: + secretKeyRef: + name: weave-net + key: WEAVE_PASSWORD + - name: CHECKPOINT_DISABLE + value: "{{ weave_checkpoint_disable | bool | int }}" + - name: CONN_LIMIT + value: "{{ weave_conn_limit | int }}" + - name: HAIRPIN_MODE + value: "{{ weave_hairpin_mode | bool | lower }}" + - name: IPALLOC_RANGE + value: "{{ weave_ipalloc_range }}" + - name: EXPECT_NPC + value: "{{ weave_expect_npc | bool | int }}" +{% if weave_kube_peers %} + - name: KUBE_PEERS + value: "{{ weave_kube_peers }}" +{% endif %} +{% if weave_ipalloc_init %} + - name: IPALLOC_INIT + value: "{{ weave_ipalloc_init }}" +{% endif %} +{% if weave_expose_ip %} + - name: WEAVE_EXPOSE_IP + value: "{{ weave_expose_ip }}" +{% endif %} +{% if weave_metrics_addr %} + - name: WEAVE_METRICS_ADDR + value: "{{ weave_metrics_addr }}" +{% endif %} +{% if weave_status_addr %} + - name: WEAVE_STATUS_ADDR + value: "{{ weave_status_addr }}" +{% endif %} +{% if weave_iptables_backend %} + - name: IPTABLES_BACKEND + value: "{{ weave_iptables_backend }}" +{% endif %} + - name: WEAVE_MTU + value: "{{ weave_mtu | int }}" + - name: NO_MASQ_LOCAL + value: "{{ weave_no_masq_local | bool | int }}" +{% if weave_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_extra_args }}" +{% endif %} + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /status + port: 6784 + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: weavedb + mountPath: /weavedb + - name: dbus + mountPath: /host/var/lib/dbus + readOnly: true + - mountPath: /host/etc/machine-id + name: cni-machine-id + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: weave-npc + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName +{% if weave_npc_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_npc_extra_args }}" +{% endif %} + image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: false + restartPolicy: Always + securityContext: + seLinuxOptions: {} + serviceAccountName: weave-net + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: weavedb + hostPath: + path: /var/lib/weave + - name: cni-bin + hostPath: + path: /opt + - name: cni-bin2 + hostPath: + path: /home + - name: cni-conf + hostPath: + path: /etc + - name: cni-machine-id + hostPath: + path: /etc/machine-id + - name: dbus + hostPath: + path: /var/lib/dbus + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + priorityClassName: system-node-critical + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate + - apiVersion: v1 + kind: Secret + metadata: + name: weave-net + namespace: kube-system + data: + WEAVE_PASSWORD: "{{ weave_password | default("") | b64encode }}" diff --git a/kubespray/roles/recover_control_plane/OWNERS b/kubespray/roles/recover_control_plane/OWNERS new file mode 100644 index 0000000..cb814a1 --- /dev/null +++ b/kubespray/roles/recover_control_plane/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - qvicksilver + - yujunz +reviewers: + - qvicksilver + - yujunz diff --git a/kubespray/roles/recover_control_plane/control-plane/defaults/main.yml b/kubespray/roles/recover_control_plane/control-plane/defaults/main.yml new file mode 100644 index 0000000..229514b --- /dev/null +++ b/kubespray/roles/recover_control_plane/control-plane/defaults/main.yml @@ -0,0 +1,2 @@ +--- +bin_dir: /usr/local/bin diff --git a/kubespray/roles/recover_control_plane/control-plane/tasks/main.yml b/kubespray/roles/recover_control_plane/control-plane/tasks/main.yml new file mode 100644 index 0000000..4a4e3eb --- /dev/null +++ b/kubespray/roles/recover_control_plane/control-plane/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: Wait for apiserver + command: "{{ kubectl }} get nodes" + environment: + - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + register: apiserver_is_ready + until: apiserver_is_ready.rc == 0 + retries: 6 + delay: 10 + changed_when: false + when: groups['broken_kube_control_plane'] + +- name: Delete broken kube_control_plane nodes from cluster + command: "{{ kubectl }} delete node {{ item }}" + environment: + - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + with_items: "{{ groups['broken_kube_control_plane'] }}" + register: delete_broken_kube_masters + failed_when: false + when: groups['broken_kube_control_plane'] + +- name: Fail if unable to delete broken kube_control_plane nodes from cluster + fail: + msg: "Unable to delete broken kube_control_plane node: {{ item.item }}" + loop: "{{ delete_broken_kube_masters.results }}" + changed_when: false + when: + - groups['broken_kube_control_plane'] + - "item.rc != 0 and not 'NotFound' in item.stderr" diff --git a/kubespray/roles/recover_control_plane/etcd/tasks/main.yml b/kubespray/roles/recover_control_plane/etcd/tasks/main.yml new file mode 100644 index 0000000..45e2c65 --- /dev/null +++ b/kubespray/roles/recover_control_plane/etcd/tasks/main.yml @@ -0,0 +1,93 @@ +--- +- name: Get etcd endpoint health + command: "{{ bin_dir }}/etcdctl endpoint health" + register: etcd_endpoint_health + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + when: + - groups['broken_etcd'] + +- name: Set healthy fact + set_fact: + healthy: "{{ etcd_endpoint_health.stderr is match('Error: unhealthy cluster') }}" + when: + - groups['broken_etcd'] + +- name: Set has_quorum fact + set_fact: + has_quorum: "{{ etcd_endpoint_health.stdout_lines | select('match', '.*is healthy.*') | list | length >= etcd_endpoint_health.stderr_lines | select('match', '.*is unhealthy.*') | list | length }}" + when: + - groups['broken_etcd'] + +- include_tasks: recover_lost_quorum.yml + when: + - groups['broken_etcd'] + - not has_quorum + +- name: Remove etcd data dir + file: + path: "{{ etcd_data_dir }}" + state: absent + delegate_to: "{{ item }}" + with_items: "{{ groups['broken_etcd'] }}" + ignore_errors: true # noqa ignore-errors + when: + - groups['broken_etcd'] + - has_quorum + +- name: Delete old certificates + # noqa 302 ignore-error - rm is ok here for now + shell: "rm {{ etcd_cert_dir }}/*{{ item }}*" + with_items: "{{ groups['broken_etcd'] }}" + register: delete_old_cerificates + ignore_errors: true + when: groups['broken_etcd'] + +- name: Fail if unable to delete old certificates + fail: + msg: "Unable to delete old certificates for: {{ item.item }}" + loop: "{{ delete_old_cerificates.results }}" + changed_when: false + when: + - groups['broken_etcd'] + - "item.rc != 0 and not 'No such file or directory' in item.stderr" + +- name: Get etcd cluster members + command: "{{ bin_dir }}/etcdctl member list" + register: member_list + changed_when: false + check_mode: no + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + when: + - groups['broken_etcd'] + - not healthy + - has_quorum + +- name: Remove broken cluster members + command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + with_nested: + - "{{ groups['broken_etcd'] }}" + - "{{ member_list.stdout_lines }}" + when: + - groups['broken_etcd'] + - not healthy + - has_quorum + - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2] diff --git a/kubespray/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/kubespray/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml new file mode 100644 index 0000000..1ecc90f --- /dev/null +++ b/kubespray/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml @@ -0,0 +1,59 @@ +--- +- name: Save etcd snapshot + command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" + environment: + - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}" + - ETCDCTL_API: 3 + when: etcd_snapshot is not defined + +- name: Transfer etcd snapshot to host + copy: + src: "{{ etcd_snapshot }}" + dest: /tmp/snapshot.db + mode: 0640 + when: etcd_snapshot is defined + +- name: Stop etcd + systemd: + name: etcd + state: stopped + +- name: Remove etcd data-dir + file: + path: "{{ etcd_data_dir }}" + state: absent + +- name: Restore etcd snapshot # noqa 301 305 + shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}" + environment: + - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + - ETCDCTL_API: 3 + +- name: Remove etcd snapshot + file: + path: /tmp/snapshot.db + state: absent + +- name: Change etcd data-dir owner + file: + path: "{{ etcd_data_dir }}" + owner: etcd + group: etcd + recurse: true + +- name: Reconfigure etcd + replace: + path: /etc/etcd.env + regexp: "^(ETCD_INITIAL_CLUSTER=).*" + replace: '\1{{ etcd_member_name }}={{ etcd_peer_url }}' + +- name: Start etcd + systemd: + name: etcd + state: started diff --git a/kubespray/roles/recover_control_plane/post-recover/tasks/main.yml b/kubespray/roles/recover_control_plane/post-recover/tasks/main.yml new file mode 100644 index 0000000..b1cd5e5 --- /dev/null +++ b/kubespray/roles/recover_control_plane/post-recover/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# TODO: Figure out why kubeadm does not fix this +- name: Set etcd-servers fact + set_fact: + etcd_servers: >- + {% for host in groups['etcd'] -%} + {% if not loop.last -%} + https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379, + {%- endif -%} + {%- if loop.last -%} + https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379 + {%- endif -%} + {%- endfor -%} + +- name: Update apiserver etcd-servers list + replace: + path: /etc/kubernetes/manifests/kube-apiserver.yaml + regexp: "(etcd-servers=).*" + replace: "\\1{{ etcd_servers }}" diff --git a/kubespray/roles/remove-node/post-remove/defaults/main.yml b/kubespray/roles/remove-node/post-remove/defaults/main.yml new file mode 100644 index 0000000..11298b9 --- /dev/null +++ b/kubespray/roles/remove-node/post-remove/defaults/main.yml @@ -0,0 +1,3 @@ +--- +delete_node_retries: 10 +delete_node_delay_seconds: 3 diff --git a/kubespray/roles/remove-node/post-remove/tasks/main.yml b/kubespray/roles/remove-node/post-remove/tasks/main.yml new file mode 100644 index 0000000..36b1e9f --- /dev/null +++ b/kubespray/roles/remove-node/post-remove/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: remove-node | Delete node + command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" + # ignore servers that are not nodes + when: inventory_hostname in groups['k8s_cluster'] and kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + retries: "{{ delete_node_retries }}" + # Sometimes the api-server can have a short window of indisponibility when we delete a master node + delay: "{{ delete_node_delay_seconds }}" + register: result + until: result is not failed diff --git a/kubespray/roles/remove-node/pre-remove/defaults/main.yml b/kubespray/roles/remove-node/pre-remove/defaults/main.yml new file mode 100644 index 0000000..deaa8af --- /dev/null +++ b/kubespray/roles/remove-node/pre-remove/defaults/main.yml @@ -0,0 +1,6 @@ +--- +allow_ungraceful_removal: false +drain_grace_period: 300 +drain_timeout: 360s +drain_retries: 3 +drain_retry_delay_seconds: 10 diff --git a/kubespray/roles/remove-node/pre-remove/tasks/main.yml b/kubespray/roles/remove-node/pre-remove/tasks/main.yml new file mode 100644 index 0000000..add5120 --- /dev/null +++ b/kubespray/roles/remove-node/pre-remove/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: remove-node | List nodes + command: >- + {{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} + register: nodes + delegate_to: "{{ groups['kube_control_plane']|first }}" + changed_when: false + run_once: true + +- name: remove-node | Drain node except daemonsets resource # noqa 301 + command: >- + {{ kubectl }} drain + --force + --ignore-daemonsets + --grace-period {{ drain_grace_period }} + --timeout {{ drain_timeout }} + --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + # ignore servers that are not nodes + when: kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines + register: result + failed_when: result.rc != 0 and not allow_ungraceful_removal + delegate_to: "{{ groups['kube_control_plane']|first }}" + until: result.rc == 0 or allow_ungraceful_removal + retries: "{{ drain_retries }}" + delay: "{{ drain_retry_delay_seconds }}" + +- name: remove-node | Wait until Volumes will be detached from the node + command: >- + {{ kubectl }} get volumeattachments -o go-template={% raw %}'{{ range .items }}{{ .spec.nodeName }}{{ "\n" }}{{ end }}'{% endraw %} + register: nodes_with_volumes + delegate_to: "{{ groups['kube_control_plane']|first }}" + changed_when: false + until: not (kube_override_hostname|default(inventory_hostname) in nodes_with_volumes.stdout_lines) + retries: 3 + delay: "{{ drain_grace_period }}" + when: + - not allow_ungraceful_removal + - kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines diff --git a/kubespray/roles/remove-node/remove-etcd-node/tasks/main.yml b/kubespray/roles/remove-node/remove-etcd-node/tasks/main.yml new file mode 100644 index 0000000..7500d6d --- /dev/null +++ b/kubespray/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -0,0 +1,55 @@ +--- +- name: Lookup node IP in kubernetes + command: > + {{ kubectl }} get nodes {{ node }} + -o jsonpath='{range .status.addresses[?(@.type=="InternalIP")]}{@.address}{"\n"}{end}' + register: remove_node_ip + when: + - inventory_hostname in groups['etcd'] + - ip is not defined + - access_ip is not defined + delegate_to: "{{ groups['etcd']|first }}" + failed_when: false + +- name: Set node IP + set_fact: + node_ip: "{{ ip | default(access_ip | default(remove_node_ip.stdout)) | trim }}" + when: + - inventory_hostname in groups['etcd'] + +- name: Make sure node_ip is set + assert: + that: node_ip is defined and node_ip | length > 0 + msg: "Etcd node ip is not set !" + when: + - inventory_hostname in groups['etcd'] + +- name: Lookup etcd member id + shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1" + register: etcd_member_id + ignore_errors: true # noqa ignore-errors + changed_when: false + check_mode: no + tags: + - facts + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" + ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" + ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" + ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379" + delegate_to: "{{ groups['etcd']|first }}" + when: inventory_hostname in groups['etcd'] + +- name: Remove etcd member from cluster + command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}" + ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}" + ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}" + ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379" + delegate_to: "{{ groups['etcd']|first }}" + when: + - inventory_hostname in groups['etcd'] + - etcd_member_id.stdout | length > 0 diff --git a/kubespray/roles/reset/defaults/main.yml b/kubespray/roles/reset/defaults/main.yml new file mode 100644 index 0000000..e45cee4 --- /dev/null +++ b/kubespray/roles/reset/defaults/main.yml @@ -0,0 +1,3 @@ +--- +flush_iptables: true +reset_restart_network: true diff --git a/kubespray/roles/reset/tasks/main.yml b/kubespray/roles/reset/tasks/main.yml new file mode 100644 index 0000000..24be4cd --- /dev/null +++ b/kubespray/roles/reset/tasks/main.yml @@ -0,0 +1,431 @@ +--- +- name: reset | stop services + service: + name: "{{ item }}" + state: stopped + with_items: + - kubelet.service + - cri-dockerd.service + - cri-dockerd.socket + failed_when: false + tags: + - services + +- name: reset | remove services + file: + path: "/etc/systemd/system/{{ item }}" + state: absent + with_items: + - kubelet.service + - cri-dockerd.service + - cri-dockerd.socket + - calico-node.service + - containerd.service.d/http-proxy.conf + - crio.service.d/http-proxy.conf + - k8s-certs-renew.service + - k8s-certs-renew.timer + register: services_removed + tags: + - services + - containerd + - crio + +- name: reset | Remove Docker + include_role: + name: container-engine/docker + tasks_from: reset + when: container_manager == 'docker' + tags: + - docker + +- name: reset | systemctl daemon-reload # noqa 503 + systemd: + daemon_reload: true + when: services_removed.changed + +- name: reset | check if crictl is present + stat: + path: "{{ bin_dir }}/crictl" + get_attributes: no + get_checksum: no + get_mime: no + register: crictl + +- name: reset | stop all cri containers + shell: "set -o pipefail && {{ bin_dir }}/crictl ps -q | xargs -r {{ bin_dir }}/crictl -t 60s stop" + args: + executable: /bin/bash + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: + - crio + - containerd + when: + - crictl.stat.exists + - container_manager in ["crio", "containerd"] + ignore_errors: true # noqa ignore-errors + +- name: reset | force remove all cri containers + command: "{{ bin_dir }}/crictl rm -a -f" + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: + - crio + - containerd + when: + - crictl.stat.exists + - container_manager in ["crio", "containerd"] + - deploy_container_engine + ignore_errors: true # noqa ignore-errors + +- name: reset | stop and disable crio service + service: + name: crio + state: stopped + enabled: false + failed_when: false + tags: [ crio ] + when: container_manager == "crio" + +- name: reset | forcefully wipe CRI-O's container and image storage + command: "crio wipe -f" + failed_when: false + tags: [ crio ] + when: container_manager == "crio" + +- name: reset | stop all cri pods + shell: "set -o pipefail && {{ bin_dir }}/crictl pods -q | xargs -r {{ bin_dir }}/crictl -t 60s stopp" + args: + executable: /bin/bash + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: [ containerd ] + when: + - crictl.stat.exists + - container_manager == "containerd" + ignore_errors: true # noqa ignore-errors + +- block: + - name: reset | force remove all cri pods + command: "{{ bin_dir }}/crictl rmp -a -f" + register: remove_all_cri_containers + retries: 5 + until: remove_all_cri_containers.rc == 0 + delay: 5 + tags: [ containerd ] + when: + - crictl.stat.exists + - container_manager == "containerd" + + rescue: + - name: reset | force remove all cri pods (rescue) + shell: "ip netns list | cut -d' ' -f 1 | xargs -n1 ip netns delete && {{ bin_dir }}/crictl rmp -a -f" + ignore_errors: true # noqa ignore-errors + changed_when: true + +- name: reset | stop etcd services + service: + name: "{{ item }}" + state: stopped + with_items: + - etcd + - etcd-events + failed_when: false + tags: + - services + +- name: reset | remove etcd services + file: + path: "/etc/systemd/system/{{ item }}.service" + state: absent + with_items: + - etcd + - etcd-events + register: services_removed + tags: + - services + +- name: reset | remove containerd + when: container_manager == 'containerd' + block: + - name: reset | stop containerd service + service: + name: containerd + state: stopped + failed_when: false + tags: + - services + + - name: reset | remove containerd service + file: + path: /etc/systemd/system/containerd.service + state: absent + register: services_removed + tags: + - services + +- name: reset | gather mounted kubelet dirs # noqa 301 + shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac + args: + executable: /bin/bash + warn: false + check_mode: no + register: mounted_dirs + failed_when: false + tags: + - mounts + +- name: reset | unmount kubelet dirs # noqa 301 + command: umount -f {{ item }} + with_items: "{{ mounted_dirs.stdout_lines }}" + register: umount_dir + when: mounted_dirs + retries: 4 + until: umount_dir.rc == 0 + delay: 5 + tags: + - mounts + +- name: flush iptables + iptables: + table: "{{ item }}" + flush: yes + with_items: + - filter + - nat + - mangle + - raw + when: flush_iptables|bool + tags: + - iptables + +- name: flush ip6tables + iptables: + table: "{{ item }}" + flush: yes + ip_version: ipv6 + with_items: + - filter + - nat + - mangle + - raw + when: flush_iptables|bool and enable_dual_stack_networks + tags: + - ip6tables + +- name: Clear IPVS virtual server table + command: "ipvsadm -C" + ignore_errors: true # noqa ignore-errors + when: + - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster'] + +- name: reset | check kube-ipvs0 network device + stat: + path: /sys/class/net/kube-ipvs0 + get_attributes: no + get_checksum: no + get_mime: no + register: kube_ipvs0 + +- name: reset | Remove kube-ipvs0 + command: "ip link del kube-ipvs0" + when: + - kube_proxy_mode == 'ipvs' + - kube_ipvs0.stat.exists + +- name: reset | check nodelocaldns network device + stat: + path: /sys/class/net/nodelocaldns + get_attributes: no + get_checksum: no + get_mime: no + register: nodelocaldns_device + +- name: reset | Remove nodelocaldns + command: "ip link del nodelocaldns" + when: + - enable_nodelocaldns|default(false)|bool + - nodelocaldns_device.stat.exists + +- name: reset | find files/dirs with immutable flag in /var/lib/kubelet + command: lsattr -laR /var/lib/kubelet + become: true + register: var_lib_kubelet_files_dirs_w_attrs + changed_when: false + no_log: true + +- name: reset | remove immutable flag from files/dirs in /var/lib/kubelet + file: + path: "{{ filedir_path }}" + state: touch + attributes: "-i" + loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}" + loop_control: + loop_var: file_dir_line + label: "{{ filedir_path }}" + vars: + filedir_path: "{{ file_dir_line.split(' ')[0] }}" + +- name: reset | delete some files and directories + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ kube_config_dir }}" + - /var/lib/kubelet + - "{{ containerd_storage_dir }}" + - "{{ ansible_env.HOME | default('/root') }}/.kube" + - "{{ ansible_env.HOME | default('/root') }}/.helm" + - "{{ ansible_env.HOME | default('/root') }}/.config/helm" + - "{{ ansible_env.HOME | default('/root') }}/.cache/helm" + - "{{ ansible_env.HOME | default('/root') }}/.local/share/helm" + - "{{ etcd_data_dir }}" + - "{{ etcd_events_data_dir }}" + - "{{ etcd_config_dir }}" + - /var/log/calico + - /etc/cni + - /etc/nerdctl + - "{{ nginx_config_dir }}" + - /etc/dnsmasq.d + - /etc/dnsmasq.conf + - /etc/dnsmasq.d-available + - /etc/etcd.env + - /etc/calico + - /etc/NetworkManager/conf.d/calico.conf + - /etc/NetworkManager/conf.d/k8s.conf + - /etc/weave.env + - /opt/cni + - /etc/dhcp/dhclient.d/zdnsupdate.sh + - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate + - /run/flannel + - /etc/flannel + - /run/kubernetes + - /usr/local/share/ca-certificates/etcd-ca.crt + - /usr/local/share/ca-certificates/kube-ca.crt + - /etc/ssl/certs/etcd-ca.pem + - /etc/ssl/certs/kube-ca.pem + - /etc/pki/ca-trust/source/anchors/etcd-ca.crt + - /etc/pki/ca-trust/source/anchors/kube-ca.crt + - /var/log/pods/ + - "{{ bin_dir }}/kubelet" + - "{{ bin_dir }}/cri-dockerd" + - "{{ bin_dir }}/etcd-scripts" + - "{{ bin_dir }}/etcd" + - "{{ bin_dir }}/etcd-events" + - "{{ bin_dir }}/etcdctl" + - "{{ bin_dir }}/etcdctl.sh" + - "{{ bin_dir }}/kubernetes-scripts" + - "{{ bin_dir }}/kubectl" + - "{{ bin_dir }}/kubeadm" + - "{{ bin_dir }}/helm" + - "{{ bin_dir }}/calicoctl" + - "{{ bin_dir }}/calicoctl.sh" + - "{{ bin_dir }}/calico-upgrade" + - "{{ bin_dir }}/weave" + - "{{ bin_dir }}/crictl" + - "{{ bin_dir }}/nerdctl" + - "{{ bin_dir }}/netctl" + - "{{ bin_dir }}/k8s-certs-renew.sh" + - /var/lib/cni + - /etc/openvswitch + - /run/openvswitch + - /var/lib/kube-router + - /var/lib/calico + - /etc/cilium + - /run/calico + - /etc/bash_completion.d/kubectl.sh + - /etc/bash_completion.d/crictl + - /etc/bash_completion.d/nerdctl + - /etc/bash_completion.d/krew + - /etc/bash_completion.d/krew.sh + - "{{ krew_root_dir }}" + - /etc/modules-load.d/kube_proxy-ipvs.conf + - /etc/modules-load.d/kubespray-br_netfilter.conf + - /etc/modules-load.d/kubespray-kata-containers.conf + - /usr/libexec/kubernetes + - /etc/origin/openvswitch + - /etc/origin/ovn + - "{{ sysctl_file_path }}" + - /etc/crictl.yaml + ignore_errors: true # noqa ignore-errors + tags: + - files + +- name: reset | remove containerd binary files + file: + path: "{{ containerd_bin_dir }}/{{ item }}" + state: absent + with_items: + - containerd + - containerd-shim + - containerd-shim-runc-v1 + - containerd-shim-runc-v2 + - containerd-stress + - crictl + - critest + - ctd-decoder + - ctr + - runc + ignore_errors: true # noqa ignore-errors + when: container_manager == 'containerd' + tags: + - files + +- name: reset | remove dns settings from dhclient.conf + blockinfile: + path: "{{ item }}" + state: absent + marker: "# Ansible entries {mark}" + failed_when: false + with_items: + - /etc/dhclient.conf + - /etc/dhcp/dhclient.conf + tags: + - files + - dns + +- name: reset | remove host entries from /etc/hosts + blockinfile: + path: "/etc/hosts" + state: absent + marker: "# Ansible inventory hosts {mark}" + tags: + - files + - dns + +- name: reset | include file with reset tasks specific to the network_plugin if exists + include_role: + name: "network_plugin/{{ kube_network_plugin }}" + tasks_from: reset + when: + - kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico'] + tags: + - network + +- name: reset | Restart network + service: + name: >- + {% if ansible_os_family == "RedHat" -%} + {%- if ansible_distribution_major_version|int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%} + NetworkManager + {%- else -%} + network + {%- endif -%} + {%- elif ansible_distribution == "Ubuntu" -%} + systemd-networkd + {%- elif ansible_os_family == "Debian" -%} + networking + {%- endif %} + state: restarted + when: + - ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + - reset_restart_network + tags: + - services + - network diff --git a/kubespray/roles/upgrade/post-upgrade/defaults/main.yml b/kubespray/roles/upgrade/post-upgrade/defaults/main.yml new file mode 100644 index 0000000..aa72843 --- /dev/null +++ b/kubespray/roles/upgrade/post-upgrade/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# how long to wait for cilium after upgrade before uncordoning +upgrade_post_cilium_wait_timeout: 120s +upgrade_node_post_upgrade_confirm: false +upgrade_node_post_upgrade_pause_seconds: 0 diff --git a/kubespray/roles/upgrade/post-upgrade/tasks/main.yml b/kubespray/roles/upgrade/post-upgrade/tasks/main.yml new file mode 100644 index 0000000..d1b1af0 --- /dev/null +++ b/kubespray/roles/upgrade/post-upgrade/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: wait for cilium + when: + - needs_cordoning|default(false) + - kube_network_plugin == 'cilium' + command: > + {{ kubectl }} + wait pod -n kube-system -l k8s-app=cilium + --field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}' + --for=condition=Ready + --timeout={{ upgrade_post_cilium_wait_timeout }} + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Confirm node uncordon + pause: + echo: yes + prompt: "Ready to uncordon node?" + when: + - upgrade_node_post_upgrade_confirm + +- name: Wait before uncordoning node + pause: + seconds: "{{ upgrade_node_post_upgrade_pause_seconds }}" + when: + - not upgrade_node_post_upgrade_confirm + - upgrade_node_post_upgrade_pause_seconds != 0 + +- name: Uncordon node + command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - needs_cordoning|default(false) diff --git a/kubespray/roles/upgrade/pre-upgrade/defaults/main.yml b/kubespray/roles/upgrade/pre-upgrade/defaults/main.yml new file mode 100644 index 0000000..900b834 --- /dev/null +++ b/kubespray/roles/upgrade/pre-upgrade/defaults/main.yml @@ -0,0 +1,20 @@ +--- +drain_grace_period: 300 +drain_timeout: 360s +drain_pod_selector: "" +drain_nodes: true +drain_retries: 3 +drain_retry_delay_seconds: 10 + +drain_fallback_enabled: false +drain_fallback_grace_period: 300 +drain_fallback_timeout: 360s +drain_fallback_retries: 0 +drain_fallback_retry_delay_seconds: 10 + +upgrade_node_always_cordon: false +upgrade_node_uncordon_after_drain_failure: true +upgrade_node_fail_if_drain_fails: true + +upgrade_node_confirm: false +upgrade_node_pause_seconds: 0 diff --git a/kubespray/roles/upgrade/pre-upgrade/tasks/main.yml b/kubespray/roles/upgrade/pre-upgrade/tasks/main.yml new file mode 100644 index 0000000..210818b --- /dev/null +++ b/kubespray/roles/upgrade/pre-upgrade/tasks/main.yml @@ -0,0 +1,130 @@ +--- +# Wait for upgrade +- name: Confirm node upgrade + pause: + echo: yes + prompt: "Ready to upgrade node? (Press Enter to continue or Ctrl+C for other options)" + when: + - upgrade_node_confirm + +- name: Wait before upgrade node + pause: + seconds: "{{ upgrade_node_pause_seconds }}" + when: + - not upgrade_node_confirm + - upgrade_node_pause_seconds != 0 + +# Node Ready: type = ready, status = True +# Node NotReady: type = ready, status = Unknown +- name: See if node is in ready state + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' + register: kubectl_node_ready + delegate_to: "{{ groups['kube_control_plane'][0] }}" + failed_when: false + changed_when: false + +# SchedulingDisabled: unschedulable = true +# else unschedulable key doesn't exist +- name: See if node is schedulable + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath='{ .spec.unschedulable }' + register: kubectl_node_schedulable + delegate_to: "{{ groups['kube_control_plane'][0] }}" + failed_when: false + changed_when: false + +- name: Set if node needs cordoning + set_fact: + needs_cordoning: >- + {% if (kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout) or upgrade_node_always_cordon -%} + true + {%- else -%} + false + {%- endif %} + +- name: Node draining + block: + - name: Cordon node + command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + changed_when: true + + - name: Check kubectl version + command: "{{ kubectl }} version --client --short" + register: kubectl_version + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: yes + changed_when: false + when: + - drain_nodes + - drain_pod_selector + + - name: Ensure minimum version for drain label selector if necessary + assert: + that: "kubectl_version.stdout.split(' ')[-1] is version('v1.10.0', '>=')" + when: + - drain_nodes + - drain_pod_selector + + - name: Drain node + command: >- + {{ kubectl }} drain + --force + --ignore-daemonsets + --grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }} + --timeout {{ hostvars['localhost']['drain_timeout_after_failure'] | default(drain_timeout) }} + --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} + when: drain_nodes + register: result + failed_when: + - result.rc != 0 + - not drain_fallback_enabled + until: result.rc == 0 + retries: "{{ drain_retries }}" + delay: "{{ drain_retry_delay_seconds }}" + + - name: Drain fallback + block: + - name: Set facts after regular drain has failed + set_fact: + drain_grace_period_after_failure: "{{ drain_fallback_grace_period }}" + drain_timeout_after_failure: "{{ drain_fallback_timeout }}" + delegate_to: localhost + delegate_facts: yes + run_once: yes + + - name: Drain node - fallback with disabled eviction + command: >- + {{ kubectl }} drain + --force + --ignore-daemonsets + --grace-period {{ drain_fallback_grace_period }} + --timeout {{ drain_fallback_timeout }} + --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }} + {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} + --disable-eviction + register: drain_fallback_result + until: drain_fallback_result.rc == 0 + retries: "{{ drain_fallback_retries }}" + delay: "{{ drain_fallback_retry_delay_seconds }}" + changed_when: drain_fallback_result.rc == 0 + when: + - drain_nodes + - drain_fallback_enabled + - result.rc != 0 + + rescue: + - name: Set node back to schedulable + command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + when: upgrade_node_uncordon_after_drain_failure + - name: Fail after rescue + fail: + msg: "Failed to drain node {{ kube_override_hostname|default(inventory_hostname) }}" + when: upgrade_node_fail_if_drain_fails + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - needs_cordoning diff --git a/kubespray/roles/win_nodes/kubernetes_patch/defaults/main.yml b/kubespray/roles/win_nodes/kubernetes_patch/defaults/main.yml new file mode 100644 index 0000000..954cb51 --- /dev/null +++ b/kubespray/roles/win_nodes/kubernetes_patch/defaults/main.yml @@ -0,0 +1,4 @@ +--- + +kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests" +kube_proxy_nodeselector: "kubernetes.io/os" diff --git a/kubespray/roles/win_nodes/kubernetes_patch/tasks/main.yml b/kubespray/roles/win_nodes/kubernetes_patch/tasks/main.yml new file mode 100644 index 0000000..a6c70ed --- /dev/null +++ b/kubespray/roles/win_nodes/kubernetes_patch/tasks/main.yml @@ -0,0 +1,41 @@ +--- + +- name: Ensure that user manifests directory exists + file: + path: "{{ kubernetes_user_manifests_path }}/kubernetes" + state: directory + recurse: yes + tags: [init, cni] + +- name: Apply kube-proxy nodeselector + block: + # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" + - name: Check current nodeselector for kube-proxy daemonset + command: >- + {{ kubectl }} + get ds kube-proxy --namespace=kube-system + -o jsonpath={.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}} + register: current_kube_proxy_state + retries: 60 + delay: 5 + until: current_kube_proxy_state is succeeded + changed_when: false + + - name: Apply nodeselector patch for kube-proxy daemonset + command: > + {{ kubectl }} + patch ds kube-proxy --namespace=kube-system --type=strategic -p + '{"spec":{"template":{"spec":{"nodeSelector":{"{{ kube_proxy_nodeselector }}":"linux"} }}}}' + register: patch_kube_proxy_state + when: current_kube_proxy_state.stdout | trim | lower != "linux" + + - debug: # noqa unnamed-task + msg: "{{ patch_kube_proxy_state.stdout_lines }}" + when: patch_kube_proxy_state is not skipped + + - debug: # noqa unnamed-task + msg: "{{ patch_kube_proxy_state.stderr_lines }}" + when: patch_kube_proxy_state is not skipped + tags: init + when: + - kube_proxy_deployed diff --git a/kubespray/scale.yml b/kubespray/scale.yml new file mode 100644 index 0000000..8e79bfa --- /dev/null +++ b/kubespray/scale.yml @@ -0,0 +1,124 @@ +--- +- name: Check ansible version + import_playbook: ansible_version.yml + +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml + +- hosts: bastion[0] + gather_facts: False + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } + +- name: Bootstrap any new workers + hosts: kube_node + strategy: linear + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + gather_facts: false + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: bootstrap-os, tags: bootstrap-os } + +- name: Gather facts + tags: always + import_playbook: facts.yml + +- name: Generate the etcd certificates beforehand + hosts: etcd:kube_control_plane + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - role: etcd + tags: etcd + vars: + etcd_cluster_setup: false + etcd_events_cluster_setup: false + when: + - etcd_deployment_type != "kubeadm" + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + +- name: Download images to ansible host cache via first kube_control_plane node + hosts: kube_control_plane[0] + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" } + - { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" } + - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } + +- name: Target only workers to get kubelet installed and checking in on any new nodes(engine) + hosts: kube_node + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/preinstall, tags: preinstall } + - { role: container-engine, tags: "container-engine", when: deploy_container_engine } + - { role: download, tags: download, when: "not skip_downloads" } + - role: etcd + tags: etcd + vars: + etcd_cluster_setup: false + when: + - etcd_deployment_type != "kubeadm" + - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool + - kube_network_plugin != "calico" or calico_datastore == "etcd" + +- name: Target only workers to get kubelet installed and checking in on any new nodes(node) + hosts: kube_node + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/node, tags: node } + +- name: Upload control plane certs and retrieve encryption key + hosts: kube_control_plane | first + environment: "{{ proxy_disable_env }}" + gather_facts: False + tags: kubeadm + roles: + - { role: kubespray-defaults } + tasks: + - name: Upload control plane certificates + command: >- + {{ bin_dir }}/kubeadm init phase + --config {{ kube_config_dir }}/kubeadm-config.yaml + upload-certs + --upload-certs + environment: "{{ proxy_disable_env }}" + register: kubeadm_upload_cert + changed_when: false + - name: set fact 'kubeadm_certificate_key' for later use + set_fact: + kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}" + when: kubeadm_certificate_key is not defined + +- name: Target only workers to get kubelet installed and checking in on any new nodes(network) + hosts: kube_node + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/kubeadm, tags: kubeadm } + - { role: kubernetes/node-label, tags: node-label } + - { role: network_plugin, tags: network } + +- name: Apply resolv.conf changes now that cluster DNS is up + hosts: k8s_cluster + gather_facts: False + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + environment: "{{ proxy_disable_env }}" + roles: + - { role: kubespray-defaults } + - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } diff --git a/kubespray/scripts/collect-info.yaml b/kubespray/scripts/collect-info.yaml new file mode 100644 index 0000000..3f31217 --- /dev/null +++ b/kubespray/scripts/collect-info.yaml @@ -0,0 +1,146 @@ +--- +- hosts: all + become: true + gather_facts: no + + vars: + docker_bin_dir: /usr/bin + bin_dir: /usr/local/bin + ansible_ssh_pipelining: true + etcd_cert_dir: /etc/ssl/etcd/ssl + kube_network_plugin: calico + archive_dirname: collect-info + commands: + - name: timedate_info + cmd: timedatectl status + - name: kernel_info + cmd: uname -r + - name: docker_info + cmd: "{{ docker_bin_dir }}/docker info" + - name: ip_info + cmd: ip -4 -o a + - name: route_info + cmd: ip ro + - name: proc_info + cmd: ps auxf | grep -v ]$ + - name: systemctl_failed_info + cmd: systemctl --state=failed --no-pager + - name: k8s_info + cmd: "{{ bin_dir }}/kubectl get all --all-namespaces -o wide" + - name: errors_info + cmd: journalctl -p err --no-pager + - name: etcd_info + cmd: "{{ bin_dir }}/etcdctl endpoint --cluster health" + - name: calico_info + cmd: "{{ bin_dir }}/calicoctl node status" + when: '{{ kube_network_plugin == "calico" }}' + - name: calico_workload_info + cmd: "{{ bin_dir }}/calicoctl get workloadEndpoint -o wide" + when: '{{ kube_network_plugin == "calico" }}' + - name: calico_pool_info + cmd: "{{ bin_dir }}/calicoctl get ippool -o wide" + when: '{{ kube_network_plugin == "calico" }}' + - name: weave_info + cmd: weave report + when: '{{ kube_network_plugin == "weave" }}' + - name: weave_logs + cmd: "{{ docker_bin_dir }}/docker logs weave" + when: '{{ kube_network_plugin == "weave" }}' + - name: kube_describe_all + cmd: "{{ bin_dir }}/kubectl describe all --all-namespaces" + - name: kube_describe_nodes + cmd: "{{ bin_dir }}/kubectl describe nodes" + - name: kubelet_logs + cmd: journalctl -u kubelet --no-pager + - name: coredns_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=coredns -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done" + - name: apiserver_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done" + - name: controller_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l component=kube-controller-manager -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done" + - name: scheduler_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l component=kube-scheduler -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done" + - name: proxy_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=kube-proxy -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done" + - name: nginx_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=kube-nginx -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done" + - name: flannel_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l app=flannel -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system flannel-container; done" + when: '{{ kube_network_plugin == "flannel" }}' + - name: canal_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=canal-node -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system flannel; done" + when: '{{ kube_network_plugin == "canal" }}' + - name: calico_policy_logs + cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=calico-kube-controllers -o jsonpath={.items..metadata.name}`; + do {{ bin_dir }}/kubectl logs ${i} -n kube-system ; done" + when: '{{ kube_network_plugin in ["canal", "calico"] }}' + - name: helm_show_releases_history + cmd: "for i in `{{ bin_dir }}/helm list -q`; do {{ bin_dir }}/helm history ${i} --col-width=0; done" + when: "{{ helm_enabled|default(true) }}" + + logs: + - /var/log/syslog + - /var/log/daemon.log + - /var/log/kern.log + - /var/log/dpkg.log + - /var/log/apt/history.log + - /var/log/yum.log + - /var/log/messages + - /var/log/dmesg + + environment: + ETCDCTL_API: 3 + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" + ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" + ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" + + tasks: + - name: set etcd_access_addresses + set_fact: + etcd_access_addresses: |- + {% for item in groups['etcd'] -%} + https://{{ item }}:2379{% if not loop.last %},{% endif %} + {%- endfor %} + when: "'etcd' in groups" + + - name: Storing commands output + shell: "{{ item.cmd }} &> {{ item.name }}" + failed_when: false + with_items: "{{ commands }}" + when: item.when | default(True) + no_log: True + + - name: Fetch results + fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands + with_items: "{{ commands }}" + when: item.when | default(True) + failed_when: false + + - name: Fetch logs + fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs + with_items: "{{ logs }}" + failed_when: false + + - name: Pack results and logs + archive: + path: "/tmp/{{ archive_dirname }}" + dest: "{{ dir|default('.') }}/logs.tar.gz" + remove: true + mode: 0640 + delegate_to: localhost + connection: local + become: false + run_once: true + + - name: Clean up collected command outputs + file: path={{ item.name }} state=absent + with_items: "{{ commands }}" diff --git a/kubespray/scripts/download_hash.py b/kubespray/scripts/download_hash.py new file mode 100644 index 0000000..1a1b385 --- /dev/null +++ b/kubespray/scripts/download_hash.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 + +# After a new version of Kubernetes has been released, +# run this script to update roles/download/defaults/main.yml +# with new hashes. + +import hashlib +import sys + +import requests +from ruamel.yaml import YAML + +MAIN_YML = "../roles/download/defaults/main.yml" + +def open_main_yaml(): + yaml = YAML() + yaml.explicit_start = True + yaml.preserve_quotes = True + yaml.width = 4096 + + with open(MAIN_YML, "r") as main_yml: + data = yaml.load(main_yml) + + return data, yaml + + +def download_hash(versions): + architectures = ["arm", "arm64", "amd64", "ppc64le"] + downloads = ["kubelet", "kubectl", "kubeadm"] + + data, yaml = open_main_yaml() + + for download in downloads: + checksum_name = f"{download}_checksums" + for arch in architectures: + for version in versions: + if not version.startswith("v"): + version = f"v{version}" + url = f"https://storage.googleapis.com/kubernetes-release/release/{version}/bin/linux/{arch}/{download}" + download_file = requests.get(url, allow_redirects=True) + download_file.raise_for_status() + sha256sum = hashlib.sha256(download_file.content).hexdigest() + data[checksum_name][arch][version] = sha256sum + + with open(MAIN_YML, "w") as main_yml: + yaml.dump(data, main_yml) + print(f"\n\nUpdated {MAIN_YML}\n") + + +def usage(): + print(f"USAGE:\n {sys.argv[0]} [k8s_version1] [[k8s_version2]....[k8s_versionN]]") + + +def main(argv=None): + if not argv: + argv = sys.argv[1:] + if not argv: + usage() + return 1 + download_hash(argv) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/kubespray/scripts/download_hash.sh b/kubespray/scripts/download_hash.sh new file mode 100644 index 0000000..e15dc2a --- /dev/null +++ b/kubespray/scripts/download_hash.sh @@ -0,0 +1,28 @@ +#!/bin/sh +set -eo pipefail + +VERSIONS="$@" +ARCHITECTURES="arm arm64 amd64 ppc64le" +DOWNLOADS="kubelet kubectl kubeadm" +DOWNLOAD_DIR="tmp/kubeadm_hasher" + +if [ -z "$VERSIONS" ]; then + echo "USAGE: $0 " + exit 1 +fi + +mkdir -p ${DOWNLOAD_DIR} +for download in ${DOWNLOADS}; do + echo -e "\n\n${download}_checksums:" + for arch in ${ARCHITECTURES}; do + echo -e " ${arch}:" + for version in ${VERSIONS}; do + TARGET="${DOWNLOAD_DIR}/${download}-$version-$arch" + if [ ! -f ${TARGET} ]; then + curl -L -f -S -s -o ${TARGET} "https://storage.googleapis.com/kubernetes-release/release/${version}/bin/linux/${arch}/${download}" + fi + echo -e " ${version}: $(sha256sum ${TARGET} | awk '{print $1}')" + done + done +done +echo -e "\n\nAdd these values to roles/download/defaults/main.yml" diff --git a/kubespray/scripts/gen_tags.sh b/kubespray/scripts/gen_tags.sh new file mode 100755 index 0000000..1bc94c8 --- /dev/null +++ b/kubespray/scripts/gen_tags.sh @@ -0,0 +1,12 @@ +#!/bin/sh +set -eo pipefail + +#Generate MD formatted tags from roles and cluster yaml files +printf "|%25s |%9s\n" "Tag name" "Used for" +echo "|--------------------------|---------" +tags=$(grep -r tags: . | perl -ne '/tags:\s\[?(([\w\-_]+,?\s?)+)/ && printf "%s ", "$1"'|\ + perl -ne 'print join "\n", split /\s|,/' | sort -u) +for tag in $tags; do + match=$(cat docs/ansible.md | perl -ne "/^\|\s+${tag}\s\|\s+((\S+\s?)+)/ && printf \$1") + printf "|%25s |%s\n" "${tag}" " ${match}" +done diff --git a/kubespray/scripts/gitlab-branch-cleanup/.gitignore b/kubespray/scripts/gitlab-branch-cleanup/.gitignore new file mode 100644 index 0000000..03e7ca8 --- /dev/null +++ b/kubespray/scripts/gitlab-branch-cleanup/.gitignore @@ -0,0 +1,2 @@ +openrc +venv diff --git a/kubespray/scripts/gitlab-branch-cleanup/README.md b/kubespray/scripts/gitlab-branch-cleanup/README.md new file mode 100644 index 0000000..6a2b5ff --- /dev/null +++ b/kubespray/scripts/gitlab-branch-cleanup/README.md @@ -0,0 +1,24 @@ +# gitlab-branch-cleanup + +Cleanup old branches in a GitLab project + +## Installation + +```shell +pip install -r requirements.txt +python main.py --help +``` + +## Usage + +```console +$ export GITLAB_API_TOKEN=foobar +$ python main.py kargo-ci/kubernetes-sigs-kubespray +Deleting branch pr-5220-containerd-systemd from 2020-02-17 ... +Deleting branch pr-5561-feature/cinder_csi_fixes from 2020-02-17 ... +Deleting branch pr-5607-add-flatcar from 2020-02-17 ... +Deleting branch pr-5616-fix-typo from 2020-02-17 ... +Deleting branch pr-5634-helm_310 from 2020-02-18 ... +Deleting branch pr-5644-patch-1 from 2020-02-15 ... +Deleting branch pr-5647-master from 2020-02-17 ... +``` diff --git a/kubespray/scripts/gitlab-branch-cleanup/main.py b/kubespray/scripts/gitlab-branch-cleanup/main.py new file mode 100644 index 0000000..2d7fe1c --- /dev/null +++ b/kubespray/scripts/gitlab-branch-cleanup/main.py @@ -0,0 +1,38 @@ +import gitlab +import argparse +import os +import sys +from datetime import timedelta, datetime, timezone + + +parser = argparse.ArgumentParser( + description='Cleanup old branches in a GitLab project') +parser.add_argument('--api', default='https://gitlab.com/', + help='URL of GitLab API, defaults to gitlab.com') +parser.add_argument('--age', type=int, default=30, + help='Delete branches older than this many days') +parser.add_argument('--prefix', default='pr-', + help='Cleanup only branches with names matching this prefix') +parser.add_argument('--dry-run', action='store_true', + help='Do not delete anything') +parser.add_argument('project', + help='Path of the GitLab project') + +args = parser.parse_args() +limit = datetime.now(timezone.utc) - timedelta(days=args.age) + +if os.getenv('GITLAB_API_TOKEN', '') == '': + print("Environment variable GITLAB_API_TOKEN is required.") + sys.exit(2) + +gl = gitlab.Gitlab(args.api, private_token=os.getenv('GITLAB_API_TOKEN')) +gl.auth() + +p = gl.projects.get(args.project) +for b in p.branches.list(all=True): + date = datetime.fromisoformat(b.commit['created_at']) + if date < limit and not b.protected and not b.default and b.name.startswith(args.prefix): + print("Deleting branch %s from %s ..." % + (b.name, date.date().isoformat())) + if not args.dry_run: + b.delete() diff --git a/kubespray/scripts/gitlab-branch-cleanup/requirements.txt b/kubespray/scripts/gitlab-branch-cleanup/requirements.txt new file mode 100644 index 0000000..4a169ed --- /dev/null +++ b/kubespray/scripts/gitlab-branch-cleanup/requirements.txt @@ -0,0 +1 @@ +python-gitlab diff --git a/kubespray/scripts/gitlab-runner.sh b/kubespray/scripts/gitlab-runner.sh new file mode 100644 index 0000000..c05ee7e --- /dev/null +++ b/kubespray/scripts/gitlab-runner.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +docker run -d --name gitlab-runner --restart always -v /srv/gitlab-runner/cache:/srv/gitlab-runner/cache -v /srv/gitlab-runner/config:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:v1.10.0 + +# +#/srv/gitlab-runner/config# cat config.toml +#concurrent = 10 +#check_interval = 1 + +#[[runners]] +# name = "2edf3d71fe19" +# url = "https://gitlab.com" +# token = "THE TOKEN-CHANGEME" +# executor = "docker" +# [runners.docker] +# tls_verify = false +# image = "docker:latest" +# privileged = true +# disable_cache = false +# cache_dir = "/srv/gitlab-runner/cache" +# volumes = ["/var/run/docker.sock:/var/run/docker.sock", "/srv/gitlab-runner/cache:/cache:rw"] +# [runners.cache] diff --git a/kubespray/scripts/openstack-cleanup/.gitignore b/kubespray/scripts/openstack-cleanup/.gitignore new file mode 100644 index 0000000..61f5948 --- /dev/null +++ b/kubespray/scripts/openstack-cleanup/.gitignore @@ -0,0 +1 @@ +openrc diff --git a/kubespray/scripts/openstack-cleanup/README.md b/kubespray/scripts/openstack-cleanup/README.md new file mode 100644 index 0000000..737d2f6 --- /dev/null +++ b/kubespray/scripts/openstack-cleanup/README.md @@ -0,0 +1,21 @@ +# openstack-cleanup + +Tool to deletes openstack servers older than a specific age (default 4h). + +Useful to cleanup orphan servers that are left behind when CI is manually cancelled or fails unexpectedly. + +## Installation + +```shell +pip install -r requirements.txt +python main.py --help +``` + +## Usage + +```console +$ python main.py +This will delete VMs... (ctrl+c to cancel) +Will delete server example1 +Will delete server example2 +``` diff --git a/kubespray/scripts/openstack-cleanup/main.py b/kubespray/scripts/openstack-cleanup/main.py new file mode 100755 index 0000000..511f060 --- /dev/null +++ b/kubespray/scripts/openstack-cleanup/main.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +import argparse +import openstack +import logging +import datetime +import time + +DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' +PAUSE_SECONDS = 5 + +log = logging.getLogger('openstack-cleanup') + +parser = argparse.ArgumentParser(description='Cleanup OpenStack resources') + +parser.add_argument('-v', '--verbose', action='store_true', + help='Increase verbosity') +parser.add_argument('--hours', type=int, default=4, + help='Age (in hours) of VMs to cleanup (default: 4h)') +parser.add_argument('--dry-run', action='store_true', + help='Do not delete anything') + +args = parser.parse_args() + +oldest_allowed = datetime.datetime.now() - datetime.timedelta(hours=args.hours) + + +def main(): + if args.dry_run: + print('Running in dry-run mode') + else: + print('This will delete resources... (ctrl+c to cancel)') + time.sleep(PAUSE_SECONDS) + + conn = openstack.connect() + + print('Servers...') + map_if_old(conn.compute.delete_server, + conn.compute.servers()) + + print('Security groups...') + map_if_old(conn.network.delete_security_group, + conn.network.security_groups()) + + print('Ports...') + try: + map_if_old(conn.network.delete_port, + conn.network.ports()) + except openstack.exceptions.ConflictException as ex: + # Need to find subnet-id which should be removed from a router + for sn in conn.network.subnets(): + try: + fn_if_old(conn.network.delete_subnet, sn) + except openstack.exceptions.ConflictException: + for r in conn.network.routers(): + print("Deleting subnet %s from router %s", sn, r) + try: + conn.network.remove_interface_from_router( + r, subnet_id=sn.id) + except Exception as ex: + print("Failed to delete subnet from router as %s", ex) + + # After removing unnecessary subnet from router, retry to delete ports + map_if_old(conn.network.delete_port, + conn.network.ports()) + + print('Subnets...') + map_if_old(conn.network.delete_subnet, + conn.network.subnets()) + + print('Networks...') + for n in conn.network.networks(): + if not n.is_router_external: + fn_if_old(conn.network.delete_network, n) + + +# runs the given fn to all elements of the that are older than allowed +def map_if_old(fn, items): + for item in items: + fn_if_old(fn, item) + + +# run the given fn function only if the passed item is older than allowed +def fn_if_old(fn, item): + created_at = datetime.datetime.strptime(item.created_at, DATE_FORMAT) + if item.name == "default": # skip default security group + return + if created_at < oldest_allowed: + print('Will delete %(name)s (%(id)s)' % item) + if not args.dry_run: + fn(item) + + +if __name__ == '__main__': + # execute only if run as a script + main() diff --git a/kubespray/scripts/openstack-cleanup/requirements.txt b/kubespray/scripts/openstack-cleanup/requirements.txt new file mode 100644 index 0000000..81c57a6 --- /dev/null +++ b/kubespray/scripts/openstack-cleanup/requirements.txt @@ -0,0 +1 @@ +openstacksdk>=0.43.0 diff --git a/kubespray/scripts/premoderator.sh b/kubespray/scripts/premoderator.sh new file mode 100644 index 0000000..94713ef --- /dev/null +++ b/kubespray/scripts/premoderator.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# A naive premoderation script to allow Gitlab CI pipeline on a specific PRs' comment +# Exits with 0, if the pipeline is good to go +# Exits with 1, if the user is not allowed to start pipeline +# Exits with 2, if script is unable to get issue id from CI_BUILD_REF_NAME variable +# Exits with 3, if missing the magic comment in the pipeline to start the pipeline + +CURL_ARGS="-fs --retry 4 --retry-delay 5" +MAGIC="${MAGIC:-ci check this}" +exit_code=0 + +# Get PR number from CI_BUILD_REF_NAME +issue=$(echo ${CI_BUILD_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1') + +if [ "$issue" = "" ]; then + echo "Unable to get issue id from: $CI_BUILD_REF_NAME" + exit 2 +fi + +echo "Fetching labels from PR $issue" +labels=$(curl ${CURL_ARGS} "https://api.github.com/repos/kubernetes-sigs/kubespray/issues/${issue}?access_token=${GITHUB_TOKEN}" | jq '{labels: .labels}' | jq '.labels[].name' | jq -s '') +labels_to_patch=$(echo -n $labels | jq '. + ["needs-ci-auth"]' | tr -d "\n") + +echo "Checking for '$MAGIC' comment in PR $issue" + +# Get the user name from the PR comments with the wanted magic incantation casted +user=$(curl ${CURL_ARGS} "https://api.github.com/repos/kubernetes-sigs/kubespray/issues/${issue}/comments" | jq -M "map(select(.body | contains (\"$MAGIC\"))) | .[0] .user.login" | tr -d '"') + +# Check for the required user group membership to allow (exit 0) or decline (exit >0) the pipeline +if [ "$user" = "" ] || [ "$user" = "null" ]; then + echo "Missing '$MAGIC' comment from one of the OWNERS" + exit_code=3 +else + echo "Found comment from user: $user" + + curl ${CURL_ARGS} "https://api.github.com/orgs/kubernetes-sigs/members/${user}" + + if [ $? -ne 0 ]; then + echo "User does not have permissions to start CI run" + exit_code=1 + else + labels_to_patch=$(echo -n $labels | jq '. - ["needs-ci-auth"]' | tr -d "\n") + exit_code=0 + echo "$user has allowed CI to start" + fi +fi + +# Patch labels on PR +curl ${CURL_ARGS} --request PATCH "https://api.github.com/repos/kubernetes-sigs/kubespray/issues/${issue}?access_token=${GITHUB_TOKEN}" -H "Content-Type: application/json" -d "{\"labels\": ${labels_to_patch}}" + +exit $exit_code diff --git a/kubespray/setup.cfg b/kubespray/setup.cfg new file mode 100644 index 0000000..96f50b6 --- /dev/null +++ b/kubespray/setup.cfg @@ -0,0 +1,62 @@ +[metadata] +name = kubespray +summary = Ansible modules for installing Kubernetes +description-file = + README.md +author = Kubespray +author-email = smainklh@gmail.com +license = Apache License (2.0) +home-page = https://github.com/kubernetes-sigs/kubespray +classifier = + License :: OSI Approved :: Apache Software License + Development Status :: 4 - Beta + Intended Audience :: Developers + Intended Audience :: System Administrators + Intended Audience :: Information Technology + Topic :: Utilities + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] +data_files = + usr/share/kubespray/playbooks/ = + cluster.yml + upgrade-cluster.yml + scale.yml + reset.yml + remove-node.yml + extra_playbooks/upgrade-only-k8s.yml + usr/share/kubespray/roles = roles/* + usr/share/kubespray/library = library/* + usr/share/doc/kubespray/ = + LICENSE + README.md + usr/share/doc/kubespray/inventory/ = + inventory/sample/inventory.ini + etc/kubespray/ = + ansible.cfg + etc/kubespray/inventory/sample/group_vars/ = + inventory/sample/group_vars/etcd.yml + etc/kubespray/inventory/sample/group_vars/all/ = + inventory/sample/group_vars/all/all.yml + inventory/sample/group_vars/all/azure.yml + inventory/sample/group_vars/all/coreos.yml + inventory/sample/group_vars/all/docker.yml + inventory/sample/group_vars/all/oci.yml + inventory/sample/group_vars/all/openstack.yml + +[wheel] +universal = 1 + +[pbr] +skip_authors = True +skip_changelog = True + +[bdist_rpm] +group = "System Environment/Libraries" +requires = + ansible + python-jinja2 + python-netaddr diff --git a/kubespray/setup.py b/kubespray/setup.py new file mode 100644 index 0000000..6a931a6 --- /dev/null +++ b/kubespray/setup.py @@ -0,0 +1,19 @@ +# Copyright Red Hat, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import setuptools + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/kubespray/test-infra/image-builder/Makefile b/kubespray/test-infra/image-builder/Makefile new file mode 100644 index 0000000..82dba64 --- /dev/null +++ b/kubespray/test-infra/image-builder/Makefile @@ -0,0 +1,2 @@ +deploy: + ansible-playbook -i hosts.ini -e docker_password=$(docker_password) cluster.yml diff --git a/kubespray/test-infra/image-builder/OWNERS b/kubespray/test-infra/image-builder/OWNERS new file mode 100644 index 0000000..0d2e92d --- /dev/null +++ b/kubespray/test-infra/image-builder/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - woopstar + - ant31 +reviewers: + - woopstar + - ant31 diff --git a/kubespray/test-infra/image-builder/cluster.yml b/kubespray/test-infra/image-builder/cluster.yml new file mode 100644 index 0000000..a25de7f --- /dev/null +++ b/kubespray/test-infra/image-builder/cluster.yml @@ -0,0 +1,5 @@ +--- +- hosts: image-builder + gather_facts: false + roles: + - kubevirt-images diff --git a/kubespray/test-infra/image-builder/hosts.ini b/kubespray/test-infra/image-builder/hosts.ini new file mode 100644 index 0000000..e000302 --- /dev/null +++ b/kubespray/test-infra/image-builder/hosts.ini @@ -0,0 +1,4 @@ +image-builder-1 ansible_ssh_host=xxx.xxx.xxx.xxx + +[image-builder] +image-builder-1 diff --git a/kubespray/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml b/kubespray/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml new file mode 100644 index 0000000..5b38495 --- /dev/null +++ b/kubespray/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml @@ -0,0 +1,126 @@ +--- +images_dir: /images/base + +docker_user: kubespray+buildvmimages +docker_host: quay.io +registry: quay.io/kubespray + +images: + ubuntu-1604: + filename: xenial-server-cloudimg-amd64-disk1.img + url: https://storage.googleapis.com/kubespray-images/ubuntu/xenial-server-cloudimg-amd64-disk1.img + checksum: sha256:c0d099383cd064390b568e20d1c39a9c68ba864764404b70f754a7b1b2f808f7 + converted: false + tag: "latest" + + ubuntu-1804: + filename: bionic-server-cloudimg-amd64.img + url: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img + checksum: sha256:c3d0e03f4245ffaabd7647e6dabf346b944a62b9934d0a89f3a04b4236386af2 + converted: false + tag: "latest" + + ubuntu-2004: + filename: focal-server-cloudimg-amd64.img + url: https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64-disk-kvm.img + checksum: sha256:8faf1f5a27c956ad0c49dac3114a355fbaf1b2d21709e10a18e67213fbb95b81 + converted: false + tag: "latest" + + ubuntu-2204: + filename: jammy-server-cloudimg-amd64.img + url: https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64-disk-kvm.img + checksum: sha256:d3f3f446bf35b2e58b82c10c8fa65525264efe5b0e398238f00ab670f49528ab + converted: false + tag: "latest" + + fedora-35: + filename: Fedora-Cloud-Base-35-1.2.x86_64.qcow2 + url: https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-35-1.2.x86_64.qcow2 + checksum: sha256:fe84502779b3477284a8d4c86731f642ca10dd3984d2b5eccdf82630a9ca2de6 + converted: true + tag: "latest" + + fedora-36: + filename: Fedora-Cloud-Base-36-1.5.x86_64.qcow2 + url: https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2 + checksum: sha256:ca9e514cc2f4a7a0188e7c68af60eb4e573d2e6850cc65b464697223f46b4605 + converted: true + tag: "latest" + + fedora-coreos: + filename: fedora-coreos-32.20200601.3.0-openstack.x86_64.qcow2.xz + url: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20200601.3.0/x86_64/fedora-coreos-32.20200601.3.0-openstack.x86_64.qcow2.xz + checksum: sha256:fe78c348189d745eb5f6f80ff9eb2af67da8e84880d264f4301faaf7c2a72646 + converted: true + tag: "latest" + + centos-7: + filename: CentOS-7-x86_64-GenericCloud-2009.qcow2 + url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2009.qcow2 + checksum: sha256:e38bab0475cc6d004d2e17015969c659e5a308111851b0e2715e84646035bdd3 + converted: true + tag: "latest" + + centos-8: + filename: CentOS-8-GenericCloud-8.3.2011-20201204.2.x86_64.qcow2 + url: http://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.3.2011-20201204.2.x86_64.qcow2 + checksum: sha256:7ec97062618dc0a7ebf211864abf63629da1f325578868579ee70c495bed3ba0 + converted: true + tag: "latest" + + almalinux-8: + filename: AlmaLinux-8-GenericCloud-latest.x86_64.qcow2 + url: https://repo.almalinux.org/almalinux/8.5/cloud/x86_64/images/AlmaLinux-8-GenericCloud-8.5-20211119.x86_64.qcow2 + checksum: sha256:d629247b12802157be127db53a7fcb484b80fceae9896d750c953a51a8c6688f + converted: true + tag: "latest" + + rockylinux-8: + filename: Rocky-8-GenericCloud-8.6-20220515.x86_64.qcow2 + url: https://download.rockylinux.org/pub/rocky/8.6/images/Rocky-8-GenericCloud-8.6-20220515.x86_64.qcow2 + checksum: sha256:77e79f487c70f6bfa5655d8084e02cb8d31900a2c2a22b2334c3401b40a1231c + converted: true + tag: "latest" + + rockylinux-9: + filename: Rocky-9-GenericCloud-9.0-20220830.0.x86_64.qcow2 + url: https://download.rockylinux.org/pub/rocky/9.0/images/x86_64/Rocky-9-GenericCloud-9.0-20220830.0.x86_64.qcow2 + checksum: sha256:f02570e0ad3653df7f56baa8157739dbe92a003234acd5824dcf94d24694e20b + converted: true + tag: "latest" + + debian-9: + filename: debian-9-openstack-amd64.qcow2 + url: https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2 + checksum: sha256:01d9345ba7a6523d214d2eaabe07fe7b4b69b28e63d7a6b322521e99e5768719 + converted: true + tag: "latest" + + debian-10: + filename: debian-10-openstack-amd64.qcow2 + url: https://cdimage.debian.org/cdimage/openstack/current-10/debian-10-openstack-amd64.qcow2 + checksum: sha512:296ad8345cb49e52464a0cb8bf4365eb0b9e4220c47ebdd73d134d51effc756d5554aee15027fffd038fef4ad5fa984c94208bce60572d58b2ab26f74bb2a5de + converted: true + tag: "latest" + + debian-11: + filename: debian-11-generic-amd64-20210814-734.qcow2 + url: https://cdimage.debian.org/cdimage/cloud/bullseye/20210814-734/debian-11-generic-amd64-20210814-734.qcow2 + checksum: sha512:ed680265ce925e3e02336b052bb476883e2d3b023f7b7d39d064d58ba5f1856869f75dca637c27c0303b731d082ff23a7e45ea2e3f9bcb8f3c4ce0c24332885d + converted: true + tag: "latest" + + oracle-7: + filename: oracle-linux-76.qcow2 + url: https://storage.googleapis.com/born-images/oracle76/oracle-linux-76.qcow2 + checksum: sha256:f396c03e907fa2a0c94d6807b9f62622f23ee3499df4456ae2a15da381fbdca5 + converted: true + tag: "latest" + + opensuse-leap-15: + filename: openSUSE-Leap-15.3.x86_64-1.0.1-NoCloud-Build2.63.qcow2 + url: https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.3/images/openSUSE-Leap-15.3.x86_64-1.0.1-NoCloud-Build2.63.qcow2 + checksum: sha256:289248945e2d058551c71c1bdcb31a361cefe7136c7fd88a09b524eedfaf5215 + converted: true + tag: "latest" diff --git a/kubespray/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/kubespray/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml new file mode 100644 index 0000000..a0b36be --- /dev/null +++ b/kubespray/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml @@ -0,0 +1,57 @@ +--- + +- name: Create image directory + file: + state: directory + path: "{{ images_dir }}" + mode: 0755 + +- name: Download images files + get_url: + url: "{{ item.value.url }}" + dest: "{{ images_dir }}/{{ item.value.filename }}" + checksum: "{{ item.value.checksum }}" + loop: "{{ images|dict2items }}" + +- name: Unxz compressed images + command: unxz --force {{ images_dir }}/{{ item.value.filename }} + loop: "{{ images|dict2items }}" + when: + - item.value.filename.endswith('.xz') + +- name: Convert images which is not in qcow2 format + command: qemu-img convert -O qcow2 {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2 + loop: "{{ images|dict2items }}" + when: + - not (item.value.converted|bool) + +- name: Make sure all images are ending with qcow2 + command: cp {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2 + loop: "{{ images|dict2items }}" + when: + - item.value.converted|bool + +- name: Resize images # noqa 301 + command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G + loop: "{{ images|dict2items }}" + +# STEP 2: Include the images inside a container +- name: Template default Dockerfile + template: + src: Dockerfile + dest: "{{ images_dir }}/Dockerfile" + mode: 0644 + +- name: Create docker images for each OS # noqa 301 + command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }} + loop: "{{ images|dict2items }}" + +- name: docker login # noqa 301 + command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}" + +- name: docker push image # noqa 301 + command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} + loop: "{{ images|dict2items }}" + +- name: docker logout # noqa 301 + command: docker logout -u="{{ docker_user }}" "{{ docker_host }}" diff --git a/kubespray/test-infra/image-builder/roles/kubevirt-images/templates/Dockerfile b/kubespray/test-infra/image-builder/roles/kubevirt-images/templates/Dockerfile new file mode 100644 index 0000000..f776cbf --- /dev/null +++ b/kubespray/test-infra/image-builder/roles/kubevirt-images/templates/Dockerfile @@ -0,0 +1,6 @@ +FROM kubevirt/registry-disk-v1alpha + +ARG cloud_image +MAINTAINER "The Kubespray Project" + +COPY $cloud_image /disk diff --git a/kubespray/test-infra/vagrant-docker/Dockerfile b/kubespray/test-infra/vagrant-docker/Dockerfile new file mode 100644 index 0000000..f8c05e7 --- /dev/null +++ b/kubespray/test-infra/vagrant-docker/Dockerfile @@ -0,0 +1,16 @@ +# Docker image published at quay.io/kubespray/vagrant + +ARG KUBESPRAY_VERSION +FROM quay.io/kubespray/kubespray:${KUBESPRAY_VERSION} + +ENV VAGRANT_VERSION=2.2.19 +ENV VAGRANT_DEFAULT_PROVIDER=libvirt +ENV VAGRANT_ANSIBLE_TAGS=facts + +RUN apt-get update && apt-get install -y wget libvirt-dev openssh-client rsync git + +# Install Vagrant +RUN wget https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb && \ + dpkg -i vagrant_${VAGRANT_VERSION}_x86_64.deb && \ + rm vagrant_${VAGRANT_VERSION}_x86_64.deb && \ + vagrant plugin install vagrant-libvirt diff --git a/kubespray/test-infra/vagrant-docker/README.md b/kubespray/test-infra/vagrant-docker/README.md new file mode 100644 index 0000000..36dcb9e --- /dev/null +++ b/kubespray/test-infra/vagrant-docker/README.md @@ -0,0 +1,24 @@ +# vagrant docker image + +This image is used for the vagrant CI jobs. It is using the libvirt driver. + +## Usage + +```console +$ docker run --net host --rm -it -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock quay.io/kubespray/vagrant +$ vagrant up +Bringing machine 'k8s-1' up with 'libvirt' provider... +Bringing machine 'k8s-2' up with 'libvirt' provider... +Bringing machine 'k8s-3' up with 'libvirt' provider... +[...] +``` + +## Cache + +You can set `/root/kubespray_cache` as a volume to keep cache between runs. + +## Building + +```shell +./build.sh v2.12.5 +``` diff --git a/kubespray/test-infra/vagrant-docker/build.sh b/kubespray/test-infra/vagrant-docker/build.sh new file mode 100755 index 0000000..dcf5445 --- /dev/null +++ b/kubespray/test-infra/vagrant-docker/build.sh @@ -0,0 +1,13 @@ +#!/bin/sh +set -euo pipefail + +if [ "$#" -ne 1 ]; then + echo "Usage: $0 tag" >&2 + exit 1 +fi + +VERSION="$1" +IMG="quay.io/kubespray/vagrant:${VERSION}" + +docker build . --build-arg "KUBESPRAY_VERSION=${VERSION}" --tag "$IMG" +docker push "$IMG" diff --git a/kubespray/tests/Makefile b/kubespray/tests/Makefile new file mode 100644 index 0000000..787449e --- /dev/null +++ b/kubespray/tests/Makefile @@ -0,0 +1,82 @@ +INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini + +$(HOME)/.ssh/id_rsa: + mkdir -p $(HOME)/.ssh + echo $(PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa + chmod 400 $(HOME)/.ssh/id_rsa + +init-gce: $(HOME)/.ssh/id_rsa + # echo $(GCE_PEM_FILE) | base64 -d > $(HOME)/.ssh/gce + echo "$(GCE_CREDENTIALS_B64)" | base64 -d > $(HOME)/.ssh/gce.json + +init-do: $(HOME)/.ssh/id_rsa + echo $(DO_PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa + +init-packet: + echo $(PACKET_VM_SSH_PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa + chmod 400 $(HOME)/.ssh/id_rsa + +create-tf: + ./scripts/create-tf.sh + +delete-tf: + ./scripts/delete-tf.sh + +create-gce: init-gce + ansible-playbook cloud_playbooks/create-gce.yml -i local_inventory/hosts.cfg -c local \ + $(ANSIBLE_LOG_LEVEL) \ + -e @"files/${CI_JOB_NAME}.yml" \ + -e gce_credentials_file=$(HOME)/.ssh/gce.json \ + -e gce_project_id=$(GCE_PROJECT_ID) \ + -e gce_service_account_email=$(GCE_ACCOUNT) \ + -e inventory_path=$(INVENTORY) \ + -e test_id=$(TEST_ID) \ + -e preemptible=$(GCE_PREEMPTIBLE) + + +delete-gce: + ansible-playbook -i $(INVENTORY) cloud_playbooks/delete-gce.yml -c local \ + $(ANSIBLE_LOG_LEVEL) \ + -e @"files/${CI_JOB_NAME}.yml" \ + -e test_id=$(TEST_ID) \ + -e gce_project_id=$(GCE_PROJECT_ID) \ + -e gce_service_account_email=$(GCE_ACCOUNT) \ + -e gce_credentials_file=$(HOME)/.ssh/gce.json \ + -e inventory_path=$(INVENTORY) + +create-do: init-do + ansible-playbook cloud_playbooks/create-do.yml -i local_inventory/hosts.cfg -c local \ + ${ANSIBLE_LOG_LEVEL} \ + -e @"files/${CI_JOB_NAME}.yml" \ + -e inventory_path=$(INVENTORY) \ + -e test_id=${TEST_ID} + +delete-do: + ansible-playbook -i $(INVENTORY) cloud_playbooks/create-do.yml -c local \ + $(ANSIBLE_LOG_LEVEL) \ + -e @"files/${CI_JOB_NAME}.yml" \ + -e state=absent \ + -e test_id=${TEST_ID} \ + -e inventory_path=$(INVENTORY) + +create-packet: init-packet + ansible-playbook cloud_playbooks/create-packet.yml -c local \ + $(ANSIBLE_LOG_LEVEL) \ + -e @"files/${CI_JOB_NAME}.yml" \ + -e test_id=$(TEST_ID) \ + -e inventory_path=$(INVENTORY) + +delete-packet: + ansible-playbook cloud_playbooks/delete-packet.yml -c local \ + $(ANSIBLE_LOG_LEVEL) \ + -e @"files/${CI_JOB_NAME}.yml" \ + -e test_id=$(TEST_ID) \ + -e inventory_path=$(INVENTORY) + +create-vagrant: + vagrant up + find / -name vagrant_ansible_inventory + cp /builds/kargo-ci/kubernetes-sigs-kubespray/inventory/sample/vagrant_ansible_inventory $(INVENTORY) + +delete-vagrant: + vagrant destroy -f diff --git a/kubespray/tests/README.md b/kubespray/tests/README.md new file mode 100644 index 0000000..05daed2 --- /dev/null +++ b/kubespray/tests/README.md @@ -0,0 +1,40 @@ +# Kubespray cloud deployment tests + +## Amazon Web Service + +| | Calico | Flannel | Weave | +------------- | ------------- | ------------- | ------------- | +Debian Jessie | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-jessie) | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-jessie/) | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-jessie/) | +Ubuntu Trusty |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-trusty/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-trusty/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-trusty)| +RHEL 7.2 |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-rhel72/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-rhel72/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-rhel72/)| +CentOS 7 |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-centos7/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-centos7/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-centos7/)| + +## Test environment variables + +### Common + +Variable | Description | Required | Default +--------------------- | -------------------------------------- | ---------- | -------- +`TEST_ID` | A unique execution ID for this test | Yes | +`KUBE_NETWORK_PLUGIN` | The network plugin (calico or flannel) | Yes | +`PRIVATE_KEY_FILE` | The path to the SSH private key file | No | + +### AWS Tests + +Variable | Description | Required | Default +--------------------- | ----------------------------------------------- | ---------- | --------- +`AWS_ACCESS_KEY` | The Amazon Access Key ID | Yes | +`AWS_SECRET_KEY` | The Amazon Secret Access Key | Yes | +`AWS_AMI_ID` | The AMI ID to deploy | Yes | +`AWS_KEY_PAIR_NAME` | The name of the EC2 key pair to use | Yes | +`AWS_SECURITY_GROUP` | The EC2 Security Group to use | No | default +`AWS_REGION` | The EC2 region | No | eu-central-1 + +#### Use private ssh key + +##### Key + +```bash +openssl pkcs12 -in gce-secure.p12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out gce-secure.pem +cat gce-secure.pem |base64 -w0 > GCE_PEM_FILE` +``` diff --git a/kubespray/tests/ansible.cfg b/kubespray/tests/ansible.cfg new file mode 100644 index 0000000..ad28272 --- /dev/null +++ b/kubespray/tests/ansible.cfg @@ -0,0 +1,14 @@ +[ssh_connection] +pipelining=True +ansible_ssh_common_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 +retries=2 +[defaults] +forks = 20 +host_key_checking=False +gathering = smart +fact_caching = jsonfile +fact_caching_connection = /tmp +stdout_callback = skippy +library = ./library:../library +callbacks_enabled = profile_tasks +jinja2_extensions = jinja2.ext.do diff --git a/kubespray/tests/cloud_playbooks/create-aws.yml b/kubespray/tests/cloud_playbooks/create-aws.yml new file mode 100644 index 0000000..8a03c92 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/create-aws.yml @@ -0,0 +1,25 @@ +--- +- hosts: localhost + become: False + gather_facts: False + + tasks: + - name: Provision a set of instances + ec2: + key_name: "{{ aws.key_name }}" + aws_access_key: "{{ aws.access_key }}" + aws_secret_key: "{{ aws.secret_key }}" + region: "{{ aws.region }}" + group_id: "{{ aws.group }}" + instance_type: "{{ aws.instance_type }}" + image: "{{ aws.ami_id }}" + wait: true + count: "{{ aws.count }}" + instance_tags: "{{ aws.tags }}" + register: ec2 + + - name: Template the inventory + template: + src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path + dest: "{{ inventory_path }}" + mode: 0644 diff --git a/kubespray/tests/cloud_playbooks/create-do.yml b/kubespray/tests/cloud_playbooks/create-do.yml new file mode 100644 index 0000000..3726eb1 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/create-do.yml @@ -0,0 +1,91 @@ +--- +- hosts: localhost + become: false + gather_facts: no + vars: + state: "present" + ssh_key_id: "6536865" + cloud_machine_type: 2gb + regions: + - nyc1 + - sfo1 + - nyc2 + - ams2 + - sgp1 + - lon1 + - nyc3 + - ams3 + - fra1 + - tor1 + - sfo2 + - blr1 + cloud_images: + - fedora-24-x64 + - centos-5-x64 + - centos-5-x32 + - fedora-25-x64 + - debian-7-x64 + - debian-7-x32 + - debian-8-x64 + - debian-8-x32 + - centos-6-x32 + - centos-6-x64 + - ubuntu-16-10-x32 + - ubuntu-16-10-x64 + - freebsd-11-0-x64-zfs + - freebsd-10-3-x64-zfs + - ubuntu-12-04-x32 + - ubuntu-12-04-x64 + - ubuntu-16-04-x64 + - ubuntu-16-04-x32 + - ubuntu-14-04-x64 + - ubuntu-14-04-x32 + - centos-7-x64 + - freebsd-11-0-x64 + - freebsd-10-3-x64 + - centos-7-3-1611-x64 + mode: default + + tasks: + - name: replace_test_id + set_fact: + test_name: "{{ test_id |regex_replace('\\.', '-') }}" + + - name: show vars + debug: msg="{{ cloud_region }}, {{ cloud_image }}" + + - name: set instance names + set_fact: + instance_names: >- + {%- if mode in ['separate', 'ha'] -%} + ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"] + {%- else -%} + ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2"] + {%- endif -%} + + - name: Manage DO instances | {{ state }} + digital_ocean: + unique_name: yes + api_token: "{{ lookup('env','DO_API_TOKEN') }}" + command: "droplet" + image_id: "{{ cloud_image }}" + name: "{{ item }}" + private_networking: no + region_id: "{{ cloud_region }}" + size_id: "{{ cloud_machine_type }}" + ssh_key_ids: "{{ ssh_key_id }}" + state: "{{ state }}" + wait: yes + register: droplets + with_items: "{{ instance_names }}" + + - debug: # noqa unnamed-task + msg: "{{ droplets }}, {{ inventory_path }}" + when: state == 'present' + + - name: Template the inventory + template: + src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path + dest: "{{ inventory_path }}" + mode: 0644 + when: state == 'present' diff --git a/kubespray/tests/cloud_playbooks/create-gce.yml b/kubespray/tests/cloud_playbooks/create-gce.yml new file mode 100644 index 0000000..f94b05b --- /dev/null +++ b/kubespray/tests/cloud_playbooks/create-gce.yml @@ -0,0 +1,77 @@ +--- +- hosts: localhost + become: false + gather_facts: no + vars: + cloud_machine_type: g1-small + mode: default + preemptible: no + ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}" + delete_group_vars: no + tasks: + - name: include vars for test {{ ci_job_name }} + include_vars: "../files/{{ ci_job_name }}.yml" + + - name: replace_test_id + set_fact: + test_name: "{{ test_id |regex_replace('\\.', '-') }}" + + - name: set instance names + set_fact: + instance_names: >- + {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%} + k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3 + {%- elif mode == 'aio' -%} + k8s-{{ test_name }}-1 + {%- else -%} + k8s-{{ test_name }}-1,k8s-{{ test_name }}-2 + {%- endif -%} + + - name: Create gce instances + google.cloud.gcp_compute_instance: + instance_names: "{{ instance_names }}" + machine_type: "{{ cloud_machine_type }}" + image: "{{ cloud_image | default(omit) }}" + image_family: "{{ cloud_image_family | default(omit) }}" + preemptible: "{{ preemptible }}" + service_account_email: "{{ gce_service_account_email }}" + pem_file: "{{ gce_pem_file | default(omit) }}" + credentials_file: "{{ gce_credentials_file | default(omit) }}" + project_id: "{{ gce_project_id }}" + zone: "{{ cloud_region }}" + metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}' + tags: "build-{{ test_name }},{{ kube_network_plugin }}" + ip_forward: yes + service_account_permissions: ['compute-rw'] + register: gce + + - name: Add instances to host group + add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts" + with_items: '{{ gce.instance_data }}' + + - name: Template the inventory # noqa 404 CI inventory templates are not in role_path + template: + src: ../templates/inventory-gce.j2 + dest: "{{ inventory_path }}" + mode: 0644 + + - name: Make group_vars directory + file: + path: "{{ inventory_path|dirname }}/group_vars" + state: directory + mode: 0755 + when: mode in ['scale', 'separate-scale', 'ha-scale'] + + - name: Template fake hosts group vars # noqa 404 CI templates are not in role_path + template: + src: ../templates/fake_hosts.yml.j2 + dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml" + mode: 0644 + when: mode in ['scale', 'separate-scale', 'ha-scale'] + + - name: Delete group_vars directory + file: + path: "{{ inventory_path|dirname }}/group_vars" + state: absent + recurse: yes + when: delete_group_vars diff --git a/kubespray/tests/cloud_playbooks/create-packet.yml b/kubespray/tests/cloud_playbooks/create-packet.yml new file mode 100644 index 0000000..0136ab3 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/create-packet.yml @@ -0,0 +1,10 @@ +--- + +- hosts: localhost + gather_facts: no + become: true + vars: + ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}" + test_name: "{{ test_id | regex_replace('\\.', '-') }}" + roles: + - { role: packet-ci, vm_cleanup: false } diff --git a/kubespray/tests/cloud_playbooks/delete-aws.yml b/kubespray/tests/cloud_playbooks/delete-aws.yml new file mode 100644 index 0000000..02f9b06 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/delete-aws.yml @@ -0,0 +1,18 @@ +--- +- hosts: kube_node + become: False + + tasks: + - name: Gather EC2 facts + action: ec2_facts + + - name: Terminate EC2 instances + ec2: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + state: absent + instance_ids: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + wait: True + delegate_to: localhost + connection: local diff --git a/kubespray/tests/cloud_playbooks/delete-gce.yml b/kubespray/tests/cloud_playbooks/delete-gce.yml new file mode 100644 index 0000000..b88abea --- /dev/null +++ b/kubespray/tests/cloud_playbooks/delete-gce.yml @@ -0,0 +1,48 @@ +--- +- hosts: localhost + become: false + gather_facts: no + vars: + mode: default + + tasks: + - name: replace_test_id + set_fact: + test_name: "{{ test_id |regex_replace('\\.', '-') }}" + + - name: set instance names + set_fact: + instance_names: >- + {%- if mode in ['separate', 'ha'] -%} + k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3 + {%- else -%} + k8s-{{ test_name }}-1,k8s-{{ test_name }}-2 + {%- endif -%} + + - name: stop gce instances + google.cloud.gcp_compute_instance: + instance_names: "{{ instance_names }}" + image: "{{ cloud_image | default(omit) }}" + service_account_email: "{{ gce_service_account_email }}" + pem_file: "{{ gce_pem_file | default(omit) }}" + credentials_file: "{{ gce_credentials_file | default(omit) }}" + project_id: "{{ gce_project_id }}" + zone: "{{ cloud_region | default('europe-west1-b') }}" + state: 'stopped' + async: 120 + poll: 3 + register: gce + + - name: delete gce instances + google.cloud.gcp_compute_instance: + instance_names: "{{ instance_names }}" + image: "{{ cloud_image | default(omit) }}" + service_account_email: "{{ gce_service_account_email }}" + pem_file: "{{ gce_pem_file | default(omit) }}" + credentials_file: "{{ gce_credentials_file | default(omit) }}" + project_id: "{{ gce_project_id }}" + zone: "{{ cloud_region | default('europe-west1-b') }}" + state: 'absent' + async: 120 + poll: 3 + register: gce diff --git a/kubespray/tests/cloud_playbooks/delete-packet.yml b/kubespray/tests/cloud_playbooks/delete-packet.yml new file mode 100644 index 0000000..3895263 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/delete-packet.yml @@ -0,0 +1,10 @@ +--- + +- hosts: localhost + gather_facts: no + become: true + vars: + ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}" + test_name: "{{ test_id | regex_replace('\\.', '-') }}" + roles: + - { role: packet-ci, vm_cleanup: true } diff --git a/kubespray/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml b/kubespray/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml new file mode 100644 index 0000000..f2c8236 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml @@ -0,0 +1,43 @@ +--- + +# VM sizing +vm_cpu_cores: 2 +vm_cpu_sockets: 1 +vm_cpu_threads: 2 +vm_memory: 2048Mi + +# Request/Limit allocation settings + +cpu_allocation_ratio: 0.5 +memory_allocation_ratio: 1 + +# Default path for inventory +inventory_path: "/tmp/{{ test_name }}/inventory" + +# Deployment mode +mode: aio + +# Cloud init config for each os type +# distro: fedora -> I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU= +# distro: rhel: -> I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo= +# distro: rhel (+ sudo and hostname packages): -> I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo= +# generic one -> I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1 +cloud_init: + centos-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + centos-8: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + almalinux-8: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + rockylinux-8: "I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + rockylinux-9: "I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + debian-9: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + debian-10: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + debian-11: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + fedora-35: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU=" + fedora-36: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU=" + opensuse-leap-15: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + rhel-server-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + amazon-linux-2: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" + ubuntu-1604: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + ubuntu-1804: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + ubuntu-2004: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + ubuntu-2204: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1" + oracle-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo=" diff --git a/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml new file mode 100644 index 0000000..bf63c91 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml @@ -0,0 +1,49 @@ +--- + +- name: "Create CI namespace {{ test_name }} for test vms" + command: "kubectl create namespace {{ test_name }}" + changed_when: false + +- name: "Create temp dir /tmp/{{ test_name }} for CI files" + file: + path: "/tmp/{{ test_name }}" + state: directory + mode: 0755 + +- name: Template vm files for CI job + template: + src: "vm.yml.j2" + dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml" + mode: 0644 + loop: "{{ range(1, vm_count|int + 1, 1) | list }}" + loop_control: + index_var: vm_id + +- name: Start vms for CI job + command: "kubectl apply -f /tmp/{{ test_name }}/instance-{{ vm_id }}.yml" + changed_when: false + loop: "{{ range(1, vm_count|int + 1, 1) | list }}" + loop_control: + index_var: vm_id + +- name: Wait for vms to have ipaddress assigned + shell: "set -o pipefail && kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'" + args: + executable: /bin/bash + changed_when: false + register: vm_ips + loop: "{{ range(1, vm_count|int + 1, 1) | list }}" + loop_control: + index_var: vm_id + retries: 20 + delay: 15 + until: + - vm_ips.stdout | ipaddr + +- name: "Create inventory for CI test in file /tmp/{{ test_name }}/inventory" + template: + src: "inventory.j2" + dest: "{{ inventory_path }}" + mode: 0644 + vars: + vms: "{{ vm_ips }}" diff --git a/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml new file mode 100644 index 0000000..353f991 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml @@ -0,0 +1,30 @@ +--- + +- name: Check if temp directory for {{ test_name }} exists + stat: + path: "/tmp/{{ test_name }}" + get_attributes: no + get_checksum: no + get_mime: no + register: temp_dir_details + +- name: "Cleanup temp directory for {{ test_name }}" + file: + path: "/tmp/{{ test_name }}" + state: absent + +- name: "Cleanup namespace for {{ test_name }}" + command: "kubectl delete namespace {{ test_name }}" + changed_when: false + +- name: Wait for namespace {{ test_name }} to be fully deleted + command: kubectl get ns {{ test_name }} + register: delete_namespace + failed_when: + - delete_namespace.rc == 0 + changed_when: + - delete_namespace.rc == 0 + retries: 12 + delay: "10" + until: + - delete_namespace.rc != 0 diff --git a/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml b/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml new file mode 100644 index 0000000..bf4e974 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml @@ -0,0 +1,16 @@ +--- + +- name: "Include custom vars for ci job: {{ ci_job_name }}" + include_vars: "../files/{{ ci_job_name }}.yml" + +- name: Set VM count needed for CI test_id + set_fact: + vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3|int }}{%- elif mode == 'aio' -%}{{ 1|int }}{%- else -%}{{ 2|int }}{%- endif -%}" + +- import_tasks: create-vms.yml + when: + - not vm_cleanup + +- import_tasks: delete-vms.yml + when: + - vm_cleanup | default(false) diff --git a/kubespray/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 b/kubespray/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 new file mode 100644 index 0000000..c49d582 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 @@ -0,0 +1,93 @@ +[all] +{% for instance in vms.results %} +instance-{{ loop.index }} ansible_host={{instance.stdout}} +{% endfor %} + +{% if mode is defined and mode in ["separate", "separate-scale"] %} +[kube_control_plane] +instance-1 + +[kube_node] +instance-2 + +[etcd] +instance-3 +{% elif mode is defined and mode in ["ha", "ha-scale"] %} +[kube_control_plane] +instance-1 +instance-2 + +[kube_node] +instance-3 + +[etcd] +instance-1 +instance-2 +instance-3 +{% elif mode == "default" %} +[kube_control_plane] +instance-1 + +[kube_node] +instance-2 + +[etcd] +instance-1 +{% elif mode == "aio" %} +[kube_control_plane] +instance-1 + +[kube_node] +instance-1 + +[etcd] +instance-1 +{% elif mode == "ha-recover" %} +[kube_control_plane] +instance-1 +instance-2 + +[kube_node] +instance-3 + +[etcd] +instance-3 +instance-1 +instance-2 + +[broken_kube_control_plane] +instance-2 + +[broken_etcd] +instance-2 etcd_member_name=etcd3 +{% elif mode == "ha-recover-noquorum" %} +[kube_control_plane] +instance-3 +instance-1 +instance-2 + +[kube_node] +instance-3 + +[etcd] +instance-3 +instance-1 +instance-2 + +[broken_kube_control_plane] +instance-1 +instance-2 + +[broken_etcd] +instance-1 etcd_member_name=etcd2 +instance-2 etcd_member_name=etcd3 +{% endif %} + +[k8s_cluster:children] +kube_node +kube_control_plane +calico_rr + +[calico_rr] + +[fake_hosts] diff --git a/kubespray/tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2 b/kubespray/tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2 new file mode 100644 index 0000000..6a8e027 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2 @@ -0,0 +1,52 @@ +--- +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachine +metadata: + name: "instance-{{ vm_id }}" + namespace: "{{ test_name }}" + labels: + kubevirt.io/os: {{ cloud_image }} +spec: + running: true + template: + metadata: + labels: + kubevirt.io/size: small + kubevirt.io/domain: "{{ test_name }}" + spec: + domain: + devices: + blockMultiQueue: true + disks: + - disk: + bus: virtio + name: containervolume + cache: writethrough + - disk: + bus: virtio + name: cloudinitvolume + interfaces: + - name: default + bridge: {} + cpu: + cores: {{ vm_cpu_cores }} + sockets: {{ vm_cpu_sockets }} + threads: {{ vm_cpu_threads }} + resources: + requests: + memory: {{ vm_memory * memory_allocation_ratio }} + cpu: {{ vm_cpu_cores * cpu_allocation_ratio }} + limits: + memory: {{ vm_memory }} + cpu: {{ vm_cpu_cores }} + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 0 + volumes: + - name: containervolume + containerDisk: + image: quay.io/kubespray/vm-{{ cloud_image }} + - name: cloudinitvolume + cloudInitNoCloud: + userDataBase64: {{ cloud_init[cloud_image] }} diff --git a/kubespray/tests/cloud_playbooks/templates/boto.j2 b/kubespray/tests/cloud_playbooks/templates/boto.j2 new file mode 100644 index 0000000..660f1a0 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/templates/boto.j2 @@ -0,0 +1,11 @@ +[Credentials] +gs_access_key_id = {{ gs_key }} +gs_secret_access_key = {{ gs_skey }} +[Boto] +https_validate_certificates = True +[GoogleCompute] +[GSUtil] +default_project_id = {{ gce_project_id }} +content_language = en +default_api_version = 2 +[OAuth2] diff --git a/kubespray/tests/cloud_playbooks/templates/gcs_life.json.j2 b/kubespray/tests/cloud_playbooks/templates/gcs_life.json.j2 new file mode 100644 index 0000000..a666c8f --- /dev/null +++ b/kubespray/tests/cloud_playbooks/templates/gcs_life.json.j2 @@ -0,0 +1,9 @@ +{ + "rule": + [ + { + "action": {"type": "Delete"}, + "condition": {"age": {{expire_days}}} + } + ] +} diff --git a/kubespray/tests/cloud_playbooks/upload-logs-gcs.yml b/kubespray/tests/cloud_playbooks/upload-logs-gcs.yml new file mode 100644 index 0000000..eeb0edb --- /dev/null +++ b/kubespray/tests/cloud_playbooks/upload-logs-gcs.yml @@ -0,0 +1,80 @@ +--- +- hosts: localhost + become: false + gather_facts: no + + vars: + expire_days: 2 + + tasks: + - name: Generate uniq bucket name prefix + raw: date +%Y%m%d + changed_when: false + register: out + + - name: replace_test_id + set_fact: + test_name: "kargo-ci-{{ out.stdout_lines[0] }}" + + - name: Set file_name for logs + set_fact: + file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz" + + - name: Create a bucket + gc_storage: + bucket: "{{ test_name }}" + mode: create + permission: public-read + gs_access_key: "{{ gs_key }}" + gs_secret_key: "{{ gs_skey }}" + no_log: True + + - name: Create a lifecycle template for the bucket + template: + src: gcs_life.json.j2 + dest: "{{ dir }}/gcs_life.json" + mode: 0644 + + - name: Create a boto config to access GCS + template: + src: boto.j2 + dest: "{{ dir }}/.boto" + mode: 0640 + no_log: True + + - name: Download gsutil cp installer + get_url: + url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash + dest: "{{ dir }}/gcp-installer.sh" + + - name: Get gsutil tool + script: "{{ dir }}/gcp-installer.sh" + environment: + CLOUDSDK_CORE_DISABLE_PROMPTS: 1 + CLOUDSDK_INSTALL_DIR: "{{ dir }}" + no_log: True + failed_when: false + + - name: Apply the lifecycle rules # noqa 301 + command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}" + changed_when: false + environment: + BOTO_CONFIG: "{{ dir }}/.boto" + no_log: True + + - name: Upload collected diagnostic info + gc_storage: + bucket: "{{ test_name }}" + mode: put + permission: public-read + object: "{{ file_name }}" + src: "{{ dir }}/logs.tar.gz" + headers: '{"Content-Encoding": "x-gzip"}' + gs_access_key: "{{ gs_key }}" + gs_secret_key: "{{ gs_skey }}" + expiration: "{{ expire_days * 36000|int }}" + failed_when: false + no_log: True + + - debug: # noqa unnamed-task + msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}" diff --git a/kubespray/tests/cloud_playbooks/wait-for-ssh.yml b/kubespray/tests/cloud_playbooks/wait-for-ssh.yml new file mode 100644 index 0000000..7c439d9 --- /dev/null +++ b/kubespray/tests/cloud_playbooks/wait-for-ssh.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + become: False + gather_facts: False + + tasks: + - name: Wait until SSH is available + wait_for: + host: "{{ ansible_host }}" + port: 22 + timeout: 240 + delegate_to: localhost diff --git a/kubespray/tests/common/_docker_hub_registry_mirror.yml b/kubespray/tests/common/_docker_hub_registry_mirror.yml new file mode 100644 index 0000000..de2de9a --- /dev/null +++ b/kubespray/tests/common/_docker_hub_registry_mirror.yml @@ -0,0 +1,34 @@ +--- +docker_registry_mirrors: + - "https://mirror.gcr.io" + +containerd_grpc_max_recv_message_size: 16777216 +containerd_grpc_max_send_message_size: 16777216 + +containerd_registries: + "docker.io": + - "https://mirror.gcr.io" + - "https://registry-1.docker.io" + +containerd_max_container_log_line_size: -1 + +crio_registries: + - prefix: docker.io + insecure: false + blocked: false + unqualified: false + location: registry-1.docker.io + mirrors: + - location: mirror.gcr.io + insecure: false + +netcheck_agent_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-agent" +netcheck_server_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-server" + +nginx_image_repo: "{{ quay_image_repo }}/kubespray/nginx" + +flannel_image_repo: "{{ quay_image_repo}}/kubespray/flannel" + +# Kubespray settings for tests +deploy_netchecker: true +dns_min_replicas: 1 diff --git a/kubespray/tests/common/_kubespray_test_settings.yml b/kubespray/tests/common/_kubespray_test_settings.yml new file mode 100644 index 0000000..67da05c --- /dev/null +++ b/kubespray/tests/common/_kubespray_test_settings.yml @@ -0,0 +1,5 @@ +--- +# Kubespray settings for tests +deploy_netchecker: true +dns_min_replicas: 1 +unsafe_show_logs: true diff --git a/kubespray/tests/files/packet_almalinux8-calico-ha-ebpf.yml b/kubespray/tests/files/packet_almalinux8-calico-ha-ebpf.yml new file mode 100644 index 0000000..ee443f1 --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-calico-ha-ebpf.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: ha +vm_memory: 3072Mi + +# Kubespray settings +calico_bpf_enabled: true +loadbalancer_apiserver_localhost: true +use_localhost_as_kubeapi_loadbalancer: true +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_almalinux8-calico-nodelocaldns-secondary.yml b/kubespray/tests/files/packet_almalinux8-calico-nodelocaldns-secondary.yml new file mode 100644 index 0000000..52ef869 --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-calico-nodelocaldns-secondary.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: default +vm_memory: 3072Mi + +# Kubespray settings +enable_nodelocaldns_secondary: true +loadbalancer_apiserver_type: haproxy diff --git a/kubespray/tests/files/packet_almalinux8-calico-remove-node.yml b/kubespray/tests/files/packet_almalinux8-calico-remove-node.yml new file mode 100644 index 0000000..4cb5dfc --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-calico-remove-node.yml @@ -0,0 +1,7 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: ha + +# Kubespray settings +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_almalinux8-calico.yml b/kubespray/tests/files/packet_almalinux8-calico.yml new file mode 100644 index 0000000..1df4a64 --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-calico.yml @@ -0,0 +1,18 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: default +vm_memory: 3072Mi + +# Kubespray settings +metrics_server_enabled: true +dashboard_namespace: "kube-dashboard" +dashboard_enabled: true +loadbalancer_apiserver_type: haproxy + +# NTP mangement +ntp_enabled: true +ntp_timezone: Etc/UTC +ntp_manage_config: true +ntp_tinker_panic: true +ntp_force_sync_immediately: true diff --git a/kubespray/tests/files/packet_almalinux8-crio.yml b/kubespray/tests/files/packet_almalinux8-crio.yml new file mode 100644 index 0000000..35fa009 --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-crio.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: default + +# Kubespray settings +container_manager: crio +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_almalinux8-docker.yml b/kubespray/tests/files/packet_almalinux8-docker.yml new file mode 100644 index 0000000..bcc69cd --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-docker.yml @@ -0,0 +1,10 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: default +vm_memory: 3072Mi + +# Use docker +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_almalinux8-kube-ovn.yml b/kubespray/tests/files/packet_almalinux8-kube-ovn.yml new file mode 100644 index 0000000..15dbabb --- /dev/null +++ b/kubespray/tests/files/packet_almalinux8-kube-ovn.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: default +vm_memory: 3072Mi + +# Kubespray settings +kube_network_plugin: kube-ovn diff --git a/kubespray/tests/files/packet_amazon-linux-2-aio.yml b/kubespray/tests/files/packet_amazon-linux-2-aio.yml new file mode 100644 index 0000000..7b2c69b --- /dev/null +++ b/kubespray/tests/files/packet_amazon-linux-2-aio.yml @@ -0,0 +1,4 @@ +--- +# Instance settings +cloud_image: amazon-linux-2 +mode: aio diff --git a/kubespray/tests/files/packet_centos7-calico-ha-once-localhost.yml b/kubespray/tests/files/packet_centos7-calico-ha-once-localhost.yml new file mode 100644 index 0000000..950aae0 --- /dev/null +++ b/kubespray/tests/files/packet_centos7-calico-ha-once-localhost.yml @@ -0,0 +1,18 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: ha + +# Kubespray settings +download_localhost: true +download_run_once: true +typha_enabled: true +calico_apiserver_enabled: true +calico_backend: kdd +typha_secure: true +disable_ipv6_dns: true +auto_renew_certificates: true + +# Docker settings +container_manager: docker +etcd_deployment_type: docker diff --git a/kubespray/tests/files/packet_centos7-calico-ha.yml b/kubespray/tests/files/packet_centos7-calico-ha.yml new file mode 100644 index 0000000..be93a60 --- /dev/null +++ b/kubespray/tests/files/packet_centos7-calico-ha.yml @@ -0,0 +1,13 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: ha + +# Kubespray settings +download_localhost: false +download_run_once: true +typha_enabled: true +calico_apiserver_enabled: true +calico_backend: kdd +typha_secure: true +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_centos7-canal-ha.yml b/kubespray/tests/files/packet_centos7-canal-ha.yml new file mode 100644 index 0000000..94a5f0e --- /dev/null +++ b/kubespray/tests/files/packet_centos7-canal-ha.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: ha + +# Kubespray settings +calico_datastore: etcd +kube_network_plugin: canal +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_centos7-flannel-addons-ha.yml b/kubespray/tests/files/packet_centos7-flannel-addons-ha.yml new file mode 100644 index 0000000..4d060a7 --- /dev/null +++ b/kubespray/tests/files/packet_centos7-flannel-addons-ha.yml @@ -0,0 +1,50 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: ha + +# Kubespray settings +kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085 +kube_proxy_mode: iptables +kube_network_plugin: flannel +download_localhost: false +download_run_once: true +helm_enabled: true +krew_enabled: true +kubernetes_audit: true +etcd_events_cluster_enabled: true +local_volume_provisioner_enabled: true +kube_encrypt_secret_data: true +ingress_nginx_enabled: true +ingress_nginx_webhook_enabled: true +ingress_nginx_webhook_job_ttl: 30 +cert_manager_enabled: true +# Disable as health checks are still unstable and slow to respond. +metrics_server_enabled: false +metrics_server_kubelet_insecure_tls: true +kube_token_auth: true +enable_nodelocaldns: false +kubelet_rotate_server_certificates: true + +kube_oidc_url: https://accounts.google.com/.well-known/openid-configuration +kube_oidc_client_id: kubespray-example + +tls_min_version: "VersionTLS12" +tls_cipher_suites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + +# test etcd tls cipher suites +etcd_tls_cipher_suites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + +# Containerd +containerd_storage_dir: /var/data/containerd +containerd_state_dir: /run/cri/containerd +containerd_oom_score: -999 + +# Kube-vip +kube_vip_enabled: true +kube_vip_arp_enabled: true +kube_vip_controlplane_enabled: true +kube_vip_address: 192.168.1.100 diff --git a/kubespray/tests/files/packet_centos7-multus-calico.yml b/kubespray/tests/files/packet_centos7-multus-calico.yml new file mode 100644 index 0000000..350c101 --- /dev/null +++ b/kubespray/tests/files/packet_centos7-multus-calico.yml @@ -0,0 +1,7 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: default + +# Kubespray settings +kube_network_plugin_multus: true diff --git a/kubespray/tests/files/packet_centos7-weave-upgrade-ha.yml b/kubespray/tests/files/packet_centos7-weave-upgrade-ha.yml new file mode 100644 index 0000000..e290ae4 --- /dev/null +++ b/kubespray/tests/files/packet_centos7-weave-upgrade-ha.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: ha + +# Kubespray settings +kube_network_plugin: weave +kubernetes_audit: true + +# Needed to upgrade from 1.16 to 1.17, otherwise upgrade is partial and bug followed +upgrade_cluster_setup: true diff --git a/kubespray/tests/files/packet_debian10-calico.yml b/kubespray/tests/files/packet_debian10-calico.yml new file mode 100644 index 0000000..90e982a --- /dev/null +++ b/kubespray/tests/files/packet_debian10-calico.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: debian-10 +mode: default + +# Kubespray settings +auto_renew_certificates: true + +# plugins +helm_enabled: true +krew_enabled: true diff --git a/kubespray/tests/files/packet_debian10-cilium-svc-proxy.yml b/kubespray/tests/files/packet_debian10-cilium-svc-proxy.yml new file mode 100644 index 0000000..3dcbc7a --- /dev/null +++ b/kubespray/tests/files/packet_debian10-cilium-svc-proxy.yml @@ -0,0 +1,10 @@ +--- +# Instance settings +cloud_image: debian-10 +mode: ha + +# Kubespray settings +kube_network_plugin: cilium +enable_network_policy: true + +cilium_kube_proxy_replacement: strict diff --git a/kubespray/tests/files/packet_debian10-docker.yml b/kubespray/tests/files/packet_debian10-docker.yml new file mode 100644 index 0000000..fc55e7f --- /dev/null +++ b/kubespray/tests/files/packet_debian10-docker.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: debian-10 +mode: default + +# Use docker +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_debian11-calico-upgrade-once.yml b/kubespray/tests/files/packet_debian11-calico-upgrade-once.yml new file mode 100644 index 0000000..3c589a0 --- /dev/null +++ b/kubespray/tests/files/packet_debian11-calico-upgrade-once.yml @@ -0,0 +1,16 @@ +--- +# Instance settings +cloud_image: debian-11 +mode: default + +# Kubespray settings +download_run_once: true + +# Pin disabling ipip mode to ensure proper upgrade +ipip: false +calico_pool_blocksize: 26 +calico_vxlan_mode: Always +calico_network_backend: bird + +# Needed to bypass deprecation check +ignore_assert_errors: true diff --git a/kubespray/tests/files/packet_debian11-calico-upgrade.yml b/kubespray/tests/files/packet_debian11-calico-upgrade.yml new file mode 100644 index 0000000..1b05714 --- /dev/null +++ b/kubespray/tests/files/packet_debian11-calico-upgrade.yml @@ -0,0 +1,13 @@ +--- +# Instance settings +cloud_image: debian-11 +mode: default + +# Pin disabling ipip mode to ensure proper upgrade +ipip: false +calico_pool_blocksize: 26 +calico_vxlan_mode: Always +calico_network_backend: bird + +# Needed to bypass deprecation check +ignore_assert_errors: true diff --git a/kubespray/tests/files/packet_debian11-calico.yml b/kubespray/tests/files/packet_debian11-calico.yml new file mode 100644 index 0000000..61b31c2 --- /dev/null +++ b/kubespray/tests/files/packet_debian11-calico.yml @@ -0,0 +1,4 @@ +--- +# Instance settings +cloud_image: debian-11 +mode: default diff --git a/kubespray/tests/files/packet_debian11-docker.yml b/kubespray/tests/files/packet_debian11-docker.yml new file mode 100644 index 0000000..69ec8eb --- /dev/null +++ b/kubespray/tests/files/packet_debian11-docker.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: debian-11 +mode: default + +# Use docker +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_debian9-macvlan.yml b/kubespray/tests/files/packet_debian9-macvlan.yml new file mode 100644 index 0000000..a65aa96 --- /dev/null +++ b/kubespray/tests/files/packet_debian9-macvlan.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: debian-9 +mode: default + +# Kubespray settings +kube_network_plugin: macvlan +enable_nodelocaldns: false +kube_proxy_masquerade_all: true +macvlan_interface: "eth0" +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_fedora35-calico-selinux.yml b/kubespray/tests/files/packet_fedora35-calico-selinux.yml new file mode 100644 index 0000000..62b1b13 --- /dev/null +++ b/kubespray/tests/files/packet_fedora35-calico-selinux.yml @@ -0,0 +1,14 @@ +--- +# Instance settings +cloud_image: fedora-35 +mode: default + +# Kubespray settings +auto_renew_certificates: true +# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011 +# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error +# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace. +kube_proxy_mode: iptables + +# Test with SELinux in enforcing mode +preinstall_selinux_state: enforcing diff --git a/kubespray/tests/files/packet_fedora35-calico-swap-selinux.yml b/kubespray/tests/files/packet_fedora35-calico-swap-selinux.yml new file mode 100644 index 0000000..1535642 --- /dev/null +++ b/kubespray/tests/files/packet_fedora35-calico-swap-selinux.yml @@ -0,0 +1,19 @@ +--- +# Instance settings +cloud_image: fedora-35 +mode: default + +# Kubespray settings +auto_renew_certificates: true +# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011 +# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error +# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace. +kube_proxy_mode: iptables + +# Test with SELinux in enforcing mode +preinstall_selinux_state: enforcing + +# Test Alpha swap feature by leveraging zswap default config in Fedora 35 +kubelet_fail_swap_on: False +kube_feature_gates: + - "NodeSwap=True" diff --git a/kubespray/tests/files/packet_fedora35-crio.yml b/kubespray/tests/files/packet_fedora35-crio.yml new file mode 100644 index 0000000..fa3302b --- /dev/null +++ b/kubespray/tests/files/packet_fedora35-crio.yml @@ -0,0 +1,15 @@ +--- +# Instance settings +cloud_image: fedora-35 +mode: default + +# Kubespray settings +container_manager: crio +auto_renew_certificates: true +# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011 +# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error +# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace. +kube_proxy_mode: iptables + +# Test with SELinux in enforcing mode +preinstall_selinux_state: enforcing diff --git a/kubespray/tests/files/packet_fedora36-docker-calico.yml b/kubespray/tests/files/packet_fedora36-docker-calico.yml new file mode 100644 index 0000000..14ea668 --- /dev/null +++ b/kubespray/tests/files/packet_fedora36-docker-calico.yml @@ -0,0 +1,15 @@ +--- +# Instance settings +cloud_image: fedora-36 +mode: default + +# Kubespray settings +auto_renew_certificates: true +# Switching to iptable due to https://github.com/projectcalico/calico/issues/5011 +# Kubernetes v1.23.0 kube-proxy does use v.7.x now. Calico v3.20.x/v3.21.x Pods show the following error +# Bad return code from 'ipset list'. error=exit status 1 family="inet" stderr="ipset v7.1: Kernel and userspace incompatible: settype hash:ip,port with revision 6 not supported by userspace. +kube_proxy_mode: iptables + +# Docker specific settings: +container_manager: docker +etcd_deployment_type: docker diff --git a/kubespray/tests/files/packet_fedora36-docker-weave.yml b/kubespray/tests/files/packet_fedora36-docker-weave.yml new file mode 100644 index 0000000..f9ecc29 --- /dev/null +++ b/kubespray/tests/files/packet_fedora36-docker-weave.yml @@ -0,0 +1,12 @@ +--- +# Instance settings +cloud_image: fedora-36 +mode: default + +# Kubespray settings +kube_network_plugin: weave + +# Docker specific settings: +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_fedora36-kube-ovn.yml b/kubespray/tests/files/packet_fedora36-kube-ovn.yml new file mode 100644 index 0000000..7028aaa --- /dev/null +++ b/kubespray/tests/files/packet_fedora36-kube-ovn.yml @@ -0,0 +1,7 @@ +--- +# Instance settings +cloud_image: fedora-36 +mode: default + +# Kubespray settings +kube_network_plugin: kube-ovn diff --git a/kubespray/tests/files/packet_opensuse-canal.yml b/kubespray/tests/files/packet_opensuse-canal.yml new file mode 100644 index 0000000..67f3812 --- /dev/null +++ b/kubespray/tests/files/packet_opensuse-canal.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: opensuse-leap-15 +mode: default + +# Kubespray settings +calico_datastore: etcd +kube_network_plugin: canal +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_opensuse-docker-cilium.yml b/kubespray/tests/files/packet_opensuse-docker-cilium.yml new file mode 100644 index 0000000..16ae393 --- /dev/null +++ b/kubespray/tests/files/packet_opensuse-docker-cilium.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: opensuse-leap-15 +mode: default + +# Kubespray settings +kube_network_plugin: cilium + +# Docker specific settings: +container_manager: docker +etcd_deployment_type: docker diff --git a/kubespray/tests/files/packet_rockylinux8-calico.yml b/kubespray/tests/files/packet_rockylinux8-calico.yml new file mode 100644 index 0000000..b475112 --- /dev/null +++ b/kubespray/tests/files/packet_rockylinux8-calico.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: rockylinux-8 +mode: default +vm_memory: 3072Mi + +# Kubespray settings +metrics_server_enabled: true +dashboard_namespace: "kube-dashboard" +dashboard_enabled: true +loadbalancer_apiserver_type: haproxy diff --git a/kubespray/tests/files/packet_rockylinux9-calico.yml b/kubespray/tests/files/packet_rockylinux9-calico.yml new file mode 100644 index 0000000..17e6ae5 --- /dev/null +++ b/kubespray/tests/files/packet_rockylinux9-calico.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: rockylinux-9 +mode: default +vm_memory: 3072Mi + +# Kubespray settings +metrics_server_enabled: true +dashboard_namespace: "kube-dashboard" +dashboard_enabled: true +loadbalancer_apiserver_type: haproxy diff --git a/kubespray/tests/files/packet_ubuntu16-canal-ha.yml b/kubespray/tests/files/packet_ubuntu16-canal-ha.yml new file mode 100644 index 0000000..1a8eb2d --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu16-canal-ha.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: ha + +# Kubespray settings +calico_datastore: etcd +kube_network_plugin: canal diff --git a/kubespray/tests/files/packet_ubuntu16-canal-sep.yml b/kubespray/tests/files/packet_ubuntu16-canal-sep.yml new file mode 100644 index 0000000..e2e165e --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu16-canal-sep.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: separate + +# Kubespray settings +calico_datastore: etcd +kube_network_plugin: canal diff --git a/kubespray/tests/files/packet_ubuntu16-docker-weave-sep.yml b/kubespray/tests/files/packet_ubuntu16-docker-weave-sep.yml new file mode 100644 index 0000000..9b268e7 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu16-docker-weave-sep.yml @@ -0,0 +1,16 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: separate + +# Kubespray settings +kube_network_plugin: weave +auto_renew_certificates: true + +# Docker specific settings: +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns + +# Ubuntu 16 - docker containerd package available stopped at 1.4.6 +docker_containerd_version: latest diff --git a/kubespray/tests/files/packet_ubuntu16-flannel-ha.yml b/kubespray/tests/files/packet_ubuntu16-flannel-ha.yml new file mode 100644 index 0000000..8df48e3 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu16-flannel-ha.yml @@ -0,0 +1,10 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: ha + +# Kubespray settings +kube_network_plugin: flannel +etcd_deployment_type: kubeadm +kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085 +skip_non_kubeadm_warning: true diff --git a/kubespray/tests/files/packet_ubuntu18-aio-docker.yml b/kubespray/tests/files/packet_ubuntu18-aio-docker.yml new file mode 100644 index 0000000..3fd06f2 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-aio-docker.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: aio + +# Use docker +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_ubuntu18-calico-aio.yml b/kubespray/tests/files/packet_ubuntu18-calico-aio.yml new file mode 100644 index 0000000..df9e428 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-calico-aio.yml @@ -0,0 +1,4 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: aio diff --git a/kubespray/tests/files/packet_ubuntu18-calico-ha-recover-noquorum.yml b/kubespray/tests/files/packet_ubuntu18-calico-ha-recover-noquorum.yml new file mode 100644 index 0000000..a064392 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-calico-ha-recover-noquorum.yml @@ -0,0 +1,4 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: ha-recover-noquorum diff --git a/kubespray/tests/files/packet_ubuntu18-calico-ha-recover.yml b/kubespray/tests/files/packet_ubuntu18-calico-ha-recover.yml new file mode 100644 index 0000000..b1ef3b9 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-calico-ha-recover.yml @@ -0,0 +1,4 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: ha-recover diff --git a/kubespray/tests/files/packet_ubuntu18-cilium-sep.yml b/kubespray/tests/files/packet_ubuntu18-cilium-sep.yml new file mode 100644 index 0000000..04ed4cc --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-cilium-sep.yml @@ -0,0 +1,9 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: separate + +# Kubespray settings +kube_network_plugin: cilium +enable_network_policy: true +auto_renew_certificates: true diff --git a/kubespray/tests/files/packet_ubuntu18-crio.yml b/kubespray/tests/files/packet_ubuntu18-crio.yml new file mode 100644 index 0000000..f5b7e12 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-crio.yml @@ -0,0 +1,10 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: default + +# Kubespray settings +container_manager: crio + +download_localhost: false +download_run_once: true diff --git a/kubespray/tests/files/packet_ubuntu18-flannel-ha-once.yml b/kubespray/tests/files/packet_ubuntu18-flannel-ha-once.yml new file mode 100644 index 0000000..fe67289 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-flannel-ha-once.yml @@ -0,0 +1,22 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: ha + +# Kubespray settings +kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085 +kube_proxy_mode: iptables +kube_network_plugin: flannel +helm_enabled: true +krew_enabled: true +kubernetes_audit: true +etcd_events_cluster_enabled: true +local_volume_provisioner_enabled: true +kube_encrypt_secret_data: true +ingress_nginx_enabled: true +cert_manager_enabled: true +# Disable as health checks are still unstable and slow to respond. +metrics_server_enabled: false +metrics_server_kubelet_insecure_tls: true +kube_token_auth: true +enable_nodelocaldns: false diff --git a/kubespray/tests/files/packet_ubuntu18-flannel-ha.yml b/kubespray/tests/files/packet_ubuntu18-flannel-ha.yml new file mode 100644 index 0000000..cc513d0 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu18-flannel-ha.yml @@ -0,0 +1,24 @@ +--- +# Instance settings +cloud_image: ubuntu-1804 +mode: ha + +# Kubespray settings +kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085 +kube_proxy_mode: iptables +kube_network_plugin: flannel +helm_enabled: true +krew_enabled: true +kubernetes_audit: true +etcd_events_cluster_enabled: true +local_volume_provisioner_enabled: true +kube_encrypt_secret_data: true +ingress_nginx_enabled: true +ingress_nginx_webhook_enabled: true +ingress_nginx_webhook_job_ttl: 30 +cert_manager_enabled: true +# Disable as health checks are still unstable and slow to respond. +metrics_server_enabled: false +metrics_server_kubelet_insecure_tls: true +kube_token_auth: true +enable_nodelocaldns: false diff --git a/kubespray/tests/files/packet_ubuntu20-aio-docker.yml b/kubespray/tests/files/packet_ubuntu20-aio-docker.yml new file mode 100644 index 0000000..edc1220 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-aio-docker.yml @@ -0,0 +1,16 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: aio + +# Kubespray settings +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False + +# Use docker +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_ubuntu20-calico-aio-ansible-2_11.yml b/kubespray/tests/files/packet_ubuntu20-calico-aio-ansible-2_11.yml new file mode 100644 index 0000000..41d4a13 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-calico-aio-ansible-2_11.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: aio + +# Kubespray settings +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False diff --git a/kubespray/tests/files/packet_ubuntu20-calico-aio-hardening.yml b/kubespray/tests/files/packet_ubuntu20-calico-aio-hardening.yml new file mode 100644 index 0000000..76340d8 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-calico-aio-hardening.yml @@ -0,0 +1,107 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: aio + +# Kubespray settings +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False + +# The followings are for hardening +## kube-apiserver +authorization_modes: ['Node', 'RBAC'] +# AppArmor-based OS +kube_apiserver_feature_gates: ['AppArmor=true'] +kube_apiserver_request_timeout: 120s +kube_apiserver_service_account_lookup: true + +# enable kubernetes audit +kubernetes_audit: true +audit_log_path: "/var/log/kube-apiserver-log.json" +audit_log_maxage: 30 +audit_log_maxbackups: 10 +audit_log_maxsize: 100 + +tls_min_version: VersionTLS12 +tls_cipher_suites: + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + +# enable encryption at rest +kube_encrypt_secret_data: true +kube_encryption_resources: [secrets] +kube_encryption_algorithm: "secretbox" + +kube_apiserver_enable_admission_plugins: + - EventRateLimit + - AlwaysPullImages + - ServiceAccount + - NamespaceLifecycle + - NodeRestriction + - LimitRanger + - ResourceQuota + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - PodNodeSelector + - PodSecurity +kube_apiserver_admission_control_config_file: true +# EventRateLimit plugin configuration +kube_apiserver_admission_event_rate_limits: + limit_1: + type: Namespace + qps: 50 + burst: 100 + cache_size: 2000 + limit_2: + type: User + qps: 50 + burst: 100 +kube_profiling: false + +## kube-controller-manager +kube_controller_manager_bind_address: 127.0.0.1 +kube_controller_terminated_pod_gc_threshold: 50 +# AppArmor-based OS +kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"] + +## kube-scheduler +kube_scheduler_bind_address: 127.0.0.1 +kube_kubeadm_scheduler_extra_args: + profiling: false +# AppArmor-based OS +kube_scheduler_feature_gates: ["AppArmor=true"] + +## etcd +etcd_deployment_type: kubeadm + +## kubelet +kubelet_authentication_token_webhook: true +kube_read_only_port: 0 +kubelet_rotate_server_certificates: true +kubelet_protect_kernel_defaults: true +kubelet_event_record_qps: 1 +kubelet_rotate_certificates: true +kubelet_streaming_connection_idle_timeout: "5m" +kubelet_make_iptables_util_chains: true +kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"] +kubelet_seccomp_default: true +kubelet_systemd_hardening: true +# In case you have multiple interfaces in your +# control plane nodes and you want to specify the right +# IP addresses, kubelet_secure_addresses allows you +# to specify the IP from which the kubelet +# will receive the packets. +# kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112" + +# additional configurations +kube_owner: root +kube_cert_group: root + +# create a default Pod Security Configuration and deny running of insecure pods +# kube-system namespace is exempted by default +kube_pod_security_use_default: true +kube_pod_security_default_enforce: restricted diff --git a/kubespray/tests/files/packet_ubuntu20-calico-aio.yml b/kubespray/tests/files/packet_ubuntu20-calico-aio.yml new file mode 100644 index 0000000..41d4a13 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-calico-aio.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: aio + +# Kubespray settings +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False diff --git a/kubespray/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml b/kubespray/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml new file mode 100644 index 0000000..57187a8 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml @@ -0,0 +1,24 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: ha + +# use the kubeadm etcd setting to test the upgrade +etcd_deployment_type: kubeadm + +upgrade_cluster_setup: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False + +# Pin disabling ipip mode to ensure proper upgrade +ipip: false +calico_vxlan_mode: Always +calico_network_backend: bird + +# Needed to bypass deprecation check +ignore_assert_errors: true +### FIXME FLORYUT Needed for upgrade job, will be removed when releasing kubespray 2.20 +calico_pool_blocksize: 24 +### /FIXME diff --git a/kubespray/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml b/kubespray/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml new file mode 100644 index 0000000..99f7365 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml @@ -0,0 +1,11 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: default + +# use the kubeadm etcd setting to test the upgrade +etcd_deployment_type: kubeadm + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False diff --git a/kubespray/tests/files/packet_ubuntu20-calico-ha-wireguard.yml b/kubespray/tests/files/packet_ubuntu20-calico-ha-wireguard.yml new file mode 100644 index 0000000..f2e2f57 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu20-calico-ha-wireguard.yml @@ -0,0 +1,13 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: ha + +# Kubespray settings +calico_wireguard_enabled: true +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +# KVM kernel used by packet instances is missing the dummy.ko kernel module so it cannot enable nodelocaldns +enable_nodelocaldns: false diff --git a/kubespray/tests/files/packet_ubuntu22-aio-docker.yml b/kubespray/tests/files/packet_ubuntu22-aio-docker.yml new file mode 100644 index 0000000..b78c6b0 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu22-aio-docker.yml @@ -0,0 +1,17 @@ +--- +# Instance settings +cloud_image: ubuntu-2204 +mode: aio +vm_memory: 1600Mi + +# Kubespray settings +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False + +# Use docker +container_manager: docker +etcd_deployment_type: docker +resolvconf_mode: docker_dns diff --git a/kubespray/tests/files/packet_ubuntu22-calico-aio.yml b/kubespray/tests/files/packet_ubuntu22-calico-aio.yml new file mode 100644 index 0000000..558dc76 --- /dev/null +++ b/kubespray/tests/files/packet_ubuntu22-calico-aio.yml @@ -0,0 +1,12 @@ +--- +# Instance settings +cloud_image: ubuntu-2204 +mode: aio +vm_memory: 1600Mi + +# Kubespray settings +auto_renew_certificates: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False diff --git a/kubespray/tests/files/tf-elastx_ubuntu18-calico.yml b/kubespray/tests/files/tf-elastx_ubuntu18-calico.yml new file mode 100644 index 0000000..b8dbaaa --- /dev/null +++ b/kubespray/tests/files/tf-elastx_ubuntu18-calico.yml @@ -0,0 +1,5 @@ +--- +sonobuoy_enabled: true + +# Ignore ping errors +ignore_assert_errors: true diff --git a/kubespray/tests/files/tf-ovh_ubuntu18-calico.yml b/kubespray/tests/files/tf-ovh_ubuntu18-calico.yml new file mode 100644 index 0000000..d6fb9de --- /dev/null +++ b/kubespray/tests/files/tf-ovh_ubuntu18-calico.yml @@ -0,0 +1,7 @@ +--- +sonobuoy_enabled: true +pkg_install_retries: 25 +retry_stagger: 10 + +# Ignore ping errors +ignore_assert_errors: true diff --git a/kubespray/tests/files/vagrant_centos7-kube-router.rb b/kubespray/tests/files/vagrant_centos7-kube-router.rb new file mode 100644 index 0000000..620df71 --- /dev/null +++ b/kubespray/tests/files/vagrant_centos7-kube-router.rb @@ -0,0 +1,15 @@ +$num_instances = 2 +$vm_memory ||= 2048 +$os = "centos" + +$kube_master_instances = 1 +$etcd_instances = 1 + +# For CI we are not worried about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false + +$network_plugin = "kube-router" diff --git a/kubespray/tests/files/vagrant_centos7-kube-router.yml b/kubespray/tests/files/vagrant_centos7-kube-router.yml new file mode 100644 index 0000000..e9e4161 --- /dev/null +++ b/kubespray/tests/files/vagrant_centos7-kube-router.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: centos-7 +mode: default + +# Kubespray settings +kube_network_plugin: kube-router +enable_network_policy: true diff --git a/kubespray/tests/files/vagrant_fedora35-kube-router.rb b/kubespray/tests/files/vagrant_fedora35-kube-router.rb new file mode 100644 index 0000000..752ac7f --- /dev/null +++ b/kubespray/tests/files/vagrant_fedora35-kube-router.rb @@ -0,0 +1,15 @@ +$num_instances = 2 +$vm_memory ||= 2048 +$os = "fedora35" + +$kube_master_instances = 1 +$etcd_instances = 1 + +# For CI we are not worried about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false + +$network_plugin = "kube-router" diff --git a/kubespray/tests/files/vagrant_fedora35-kube-router.yml b/kubespray/tests/files/vagrant_fedora35-kube-router.yml new file mode 100644 index 0000000..2584994 --- /dev/null +++ b/kubespray/tests/files/vagrant_fedora35-kube-router.yml @@ -0,0 +1,7 @@ +--- +# Instance settings +cloud_image: fedora-35 +mode: default + +# Kubespray settings +kube_network_plugin: kube-router diff --git a/kubespray/tests/files/vagrant_ubuntu16-kube-router-sep.rb b/kubespray/tests/files/vagrant_ubuntu16-kube-router-sep.rb new file mode 100644 index 0000000..c100934 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu16-kube-router-sep.rb @@ -0,0 +1,15 @@ +$num_instances = 2 +$vm_memory ||= 2048 +$os = "ubuntu1604" + +$kube_master_instances = 1 +$etcd_instances = 1 + +# For CI we are not worried about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false + +$network_plugin = "kube-router" diff --git a/kubespray/tests/files/vagrant_ubuntu16-kube-router-sep.yml b/kubespray/tests/files/vagrant_ubuntu16-kube-router-sep.yml new file mode 100644 index 0000000..e2c60b3 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu16-kube-router-sep.yml @@ -0,0 +1,8 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: separate + +# Kubespray settings +bootstrap_os: ubuntu +kube_network_plugin: kube-router diff --git a/kubespray/tests/files/vagrant_ubuntu16-kube-router-svc-proxy.rb b/kubespray/tests/files/vagrant_ubuntu16-kube-router-svc-proxy.rb new file mode 100644 index 0000000..51fd024 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu16-kube-router-svc-proxy.rb @@ -0,0 +1,10 @@ +$os = "ubuntu1604" + +# For CI we are not worried about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false + +$network_plugin = "kube-router" diff --git a/kubespray/tests/files/vagrant_ubuntu16-kube-router-svc-proxy.yml b/kubespray/tests/files/vagrant_ubuntu16-kube-router-svc-proxy.yml new file mode 100644 index 0000000..465d42d --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu16-kube-router-svc-proxy.yml @@ -0,0 +1,10 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: separate + +# Kubespray settings +bootstrap_os: ubuntu +kube_network_plugin: kube-router + +kube_router_run_service_proxy: true diff --git a/kubespray/tests/files/vagrant_ubuntu18-calico-dual-stack.rb b/kubespray/tests/files/vagrant_ubuntu18-calico-dual-stack.rb new file mode 100644 index 0000000..f7d7765 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu18-calico-dual-stack.rb @@ -0,0 +1,7 @@ +# For CI we are not worried about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false +$network_plugin = "calico" diff --git a/kubespray/tests/files/vagrant_ubuntu18-calico-dual-stack.yml b/kubespray/tests/files/vagrant_ubuntu18-calico-dual-stack.yml new file mode 100644 index 0000000..3a45bdc --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu18-calico-dual-stack.yml @@ -0,0 +1,3 @@ +--- +# Kubespray settings +enable_dual_stack_networks: true diff --git a/kubespray/tests/files/vagrant_ubuntu18-flannel.rb b/kubespray/tests/files/vagrant_ubuntu18-flannel.rb new file mode 100644 index 0000000..25b8690 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu18-flannel.rb @@ -0,0 +1,7 @@ +# For CI we are not worries about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false +$vm_cpus = 2 \ No newline at end of file diff --git a/kubespray/tests/files/vagrant_ubuntu18-flannel.yml b/kubespray/tests/files/vagrant_ubuntu18-flannel.yml new file mode 100644 index 0000000..6f8916f --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu18-flannel.yml @@ -0,0 +1,3 @@ +--- +# Kubespray settings +kube_network_plugin: flannel diff --git a/kubespray/tests/files/vagrant_ubuntu18-weave-medium.rb b/kubespray/tests/files/vagrant_ubuntu18-weave-medium.rb new file mode 100644 index 0000000..be537f6 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu18-weave-medium.rb @@ -0,0 +1,7 @@ +$num_instances = 16 +$vm_memory ||= 2048 +$os = "ubuntu1804" +$network_plugin = "weave" +$kube_master_instances = 1 +$etcd_instances = 1 +$playbook = "tests/cloud_playbooks/wait-for-ssh.yml" diff --git a/kubespray/tests/files/vagrant_ubuntu18-weave-medium.yml b/kubespray/tests/files/vagrant_ubuntu18-weave-medium.yml new file mode 100644 index 0000000..bb5f974 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu18-weave-medium.yml @@ -0,0 +1,3 @@ +--- +# Kubespray settings +kube_network_plugin: weave diff --git a/kubespray/tests/files/vagrant_ubuntu20-flannel.rb b/kubespray/tests/files/vagrant_ubuntu20-flannel.rb new file mode 100644 index 0000000..c739f58 --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu20-flannel.rb @@ -0,0 +1,9 @@ +$os = "ubuntu2004" + +# For CI we are not worries about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false +$vm_cpus = 2 \ No newline at end of file diff --git a/kubespray/tests/files/vagrant_ubuntu20-flannel.yml b/kubespray/tests/files/vagrant_ubuntu20-flannel.yml new file mode 100644 index 0000000..6f8916f --- /dev/null +++ b/kubespray/tests/files/vagrant_ubuntu20-flannel.yml @@ -0,0 +1,3 @@ +--- +# Kubespray settings +kube_network_plugin: flannel diff --git a/kubespray/tests/local_inventory/host_vars/localhost b/kubespray/tests/local_inventory/host_vars/localhost new file mode 100644 index 0000000..695c0ec --- /dev/null +++ b/kubespray/tests/local_inventory/host_vars/localhost @@ -0,0 +1,12 @@ +aws: + key_name: "{{ key_name | default('ansibl8s') }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + region: "{{ aws_region | default('eu-west-1') }}" # default to eu-west-1 + group: "{{ aws_security_group | default ('default')}}" + instance_type: t2.micro + ami_id: "{{ aws_ami_id | default('ami-02724d1f') }}" # default to Debian Jessie + count: 3 + tags: + test_id: "{{ test_id }}" + network_plugin: "{{ kube_network_plugin }}" diff --git a/kubespray/tests/local_inventory/hosts.cfg b/kubespray/tests/local_inventory/hosts.cfg new file mode 100644 index 0000000..2302eda --- /dev/null +++ b/kubespray/tests/local_inventory/hosts.cfg @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/kubespray/tests/requirements-2.11.txt b/kubespray/tests/requirements-2.11.txt new file mode 100644 index 0000000..8db4395 --- /dev/null +++ b/kubespray/tests/requirements-2.11.txt @@ -0,0 +1,11 @@ +-r ../requirements-2.11.txt +yamllint==1.19.0 +apache-libcloud==2.2.1 +tox==3.11.1 +dopy==0.3.7 +ansible-lint==5.4.0 +molecule==3.0.6 +molecule-vagrant==0.3 +testinfra==5.2.2 +python-vagrant==0.5.15 +ara[server]==1.5.7 diff --git a/kubespray/tests/requirements-2.12.txt b/kubespray/tests/requirements-2.12.txt new file mode 100644 index 0000000..3a653d2 --- /dev/null +++ b/kubespray/tests/requirements-2.12.txt @@ -0,0 +1,11 @@ +-r ../requirements-2.12.txt +yamllint==1.19.0 +apache-libcloud==2.2.1 +tox==3.11.1 +dopy==0.3.7 +ansible-lint==5.4.0 +molecule==3.0.6 +molecule-vagrant==0.3 +testinfra==5.2.2 +python-vagrant==0.5.15 +ara[server]==1.5.7 diff --git a/kubespray/tests/requirements.txt b/kubespray/tests/requirements.txt new file mode 100644 index 0000000..3a653d2 --- /dev/null +++ b/kubespray/tests/requirements.txt @@ -0,0 +1,11 @@ +-r ../requirements-2.12.txt +yamllint==1.19.0 +apache-libcloud==2.2.1 +tox==3.11.1 +dopy==0.3.7 +ansible-lint==5.4.0 +molecule==3.0.6 +molecule-vagrant==0.3 +testinfra==5.2.2 +python-vagrant==0.5.15 +ara[server]==1.5.7 diff --git a/kubespray/tests/run-tests.sh b/kubespray/tests/run-tests.sh new file mode 100755 index 0000000..c20438e --- /dev/null +++ b/kubespray/tests/run-tests.sh @@ -0,0 +1,8 @@ +#! /bin/bash + +# curl -# -C - -o shebang-unit https://raw.github.com/arpinum-oss/shebang-unit/master/releases/shebang-unit +# chmod +x shebang-unit + +now=$(date +"%Y%m%d%H%M%S") +mkdir -p ${PWD}/tests-results +./shebang-unit --reporters=simple,junit --output-file=${PWD}/tests-results/junit_report-${now}.xml tests diff --git a/kubespray/tests/scripts/ansibl8s_test.sh b/kubespray/tests/scripts/ansibl8s_test.sh new file mode 100644 index 0000000..1f61f45 --- /dev/null +++ b/kubespray/tests/scripts/ansibl8s_test.sh @@ -0,0 +1,52 @@ +#! /bin/bash + +global_setup() { + git clone https://github.com/ansibl8s/setup-kubernetes.git setup-kubernetes + private_key="" + if [ ! -z ${PRIVATE_KEY_FILE} ] + then + private_key="--private-key=${PRIVATE_KEY_FILE}" + fi + ansible-playbook create.yml -i hosts -u admin -s \ + -e test_id=${TEST_ID} \ + -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} \ + -e aws_access_key=${AWS_ACCESS_KEY} \ + -e aws_secret_key=${AWS_SECRET_KEY} \ + -e aws_ami_id=${AWS_AMI_ID} \ + -e aws_security_group=${AWS_SECURITY_GROUP} \ + -e key_name=${AWS_KEY_PAIR_NAME} \ + -e inventory_path=${PWD}/inventory.ini \ + -e aws_region=${AWS_REGION} +} + +global_teardown() { + if [ -f inventory.ini ]; + then + ansible-playbook -i inventory.ini -u admin delete.yml + fi + rm -rf ${PWD}/setup-kubernetes +} + +should_deploy_cluster() { + ansible-playbook -i inventory.ini -s ${private_key} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml + + assertion__status_code_is_success $? +} + +should_api_server_respond() { + ansible-playbook -i inventory.ini ${private_key} testcases/010_check-apiserver.yml + + assertion__status_code_is_success $? +} + +should_pod_be_in_expected_subnet() { + ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv + + assertion__status_code_is_success $? +} + +should_resolve_cluster_dns() { + ansible-playbook -i inventory.ini -s ${private_key} testcases/040_check-network-adv.yml -vv + + assertion__status_code_is_success $? +} diff --git a/kubespray/tests/scripts/check_readme_versions.sh b/kubespray/tests/scripts/check_readme_versions.sh new file mode 100755 index 0000000..a0fbd7b --- /dev/null +++ b/kubespray/tests/scripts/check_readme_versions.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +TARGET_COMPONENTS="containerd calico cilium flannel kube-ovn kube-router weave cert-manager krew helm metallb registry cephfs-provisioner rbd-provisioner aws-ebs-csi-plugin azure-csi-plugin cinder-csi-plugin gcp-pd-csi-plugin local-path-provisioner local-volume-provisioner kube-vip ingress-nginx" + +# cd to the root directory of kubespray +cd $(dirname $0)/../../ + +echo checking kubernetes.. +version_from_default=$(grep "^kube_version:" ./roles/kubespray-defaults/defaults/main.yaml | awk '{print $2}' | sed s/\"//g) +version_from_readme=$(grep " \[kubernetes\]" ./README.md | awk '{print $3}') +if [ "${version_from_default}" != "${version_from_readme}" ]; then + echo "The version of kubernetes is different between main.yml(${version_from_default}) and README.md(${version_from_readme})." + echo "If the pull request updates kubernetes version, please update README.md also." + exit 1 +fi + +for component in $(echo ${TARGET_COMPONENTS}); do + echo checking ${component}.. + version_from_default=$(grep "^$(echo ${component} | sed s/"-"/"_"/g)_version:" ./roles/download/defaults/main.yml | awk '{print $2}' | sed s/\"//g | sed s/^v//) + if [ "${version_from_default}" = "" ]; then + version_from_default=$(grep "^$(echo ${component} | sed s/"-"/"_"/g)_version:" ./roles/kubernetes/node/defaults/main.yml | awk '{print $2}' | sed s/\"//g | sed s/^v//) + fi + version_from_readme=$(grep "\[${component}\]" ./README.md | grep "https" | awk '{print $3}' | sed s/^v//) + if [ "${version_from_default}" != "${version_from_readme}" ]; then + echo "The version of ${component} is different between main.yml(${version_from_default}) and README.md(${version_from_readme})." + echo "If the pull request updates ${component} version, please update README.md also." + exit 1 + fi +done + +echo "Succeeded to check all components." +exit 0 diff --git a/kubespray/tests/scripts/check_typo.sh b/kubespray/tests/scripts/check_typo.sh new file mode 100755 index 0000000..cdcf49b --- /dev/null +++ b/kubespray/tests/scripts/check_typo.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# cd to the root directory of kubespray +cd $(dirname $0)/../../ + +rm ./misspell* + +set -e +wget https://github.com/client9/misspell/releases/download/v0.3.4/misspell_0.3.4_linux_64bit.tar.gz +tar -zxvf ./misspell_0.3.4_linux_64bit.tar.gz +chmod 755 ./misspell +git ls-files | xargs ./misspell -error diff --git a/kubespray/tests/scripts/create-tf.sh b/kubespray/tests/scripts/create-tf.sh new file mode 100755 index 0000000..fbed302 --- /dev/null +++ b/kubespray/tests/scripts/create-tf.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -euxo pipefail + +cd .. +terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1 diff --git a/kubespray/tests/scripts/delete-tf.sh b/kubespray/tests/scripts/delete-tf.sh new file mode 100755 index 0000000..57c35c8 --- /dev/null +++ b/kubespray/tests/scripts/delete-tf.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -euxo pipefail + +cd .. +terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve diff --git a/kubespray/tests/scripts/md-table/main.py b/kubespray/tests/scripts/md-table/main.py new file mode 100755 index 0000000..9e00005 --- /dev/null +++ b/kubespray/tests/scripts/md-table/main.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +import argparse +import sys +import glob +from pathlib import Path +import yaml +from pydblite import Base +import re +import jinja2 +import sys + +from pprint import pprint + + +parser = argparse.ArgumentParser(description='Generate a Markdown table representing the CI test coverage') +parser.add_argument('--dir', default='tests/files/', help='folder with test yml files') + + +args = parser.parse_args() +p = Path(args.dir) + +env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=sys.path[0])) + +# Data represents CI coverage data matrix +class Data: + def __init__(self): + self.db = Base(':memory:') + self.db.create('container_manager', 'network_plugin', 'operating_system') + + + def set(self, container_manager, network_plugin, operating_system): + self.db.insert(container_manager=container_manager, network_plugin=network_plugin, operating_system=operating_system) + self.db.commit() + def exists(self, container_manager, network_plugin, operating_system): + return len((self.db("container_manager") == container_manager) & (self.db("network_plugin") == network_plugin) & (self.db("operating_system") == operating_system)) > 0 + + def jinja(self): + template = env.get_template('table.md.j2') + container_engines = list(self.db.get_unique_ids('container_manager')) + network_plugins = list(self.db.get_unique_ids("network_plugin")) + operating_systems = list(self.db.get_unique_ids("operating_system")) + + container_engines.sort() + network_plugins.sort() + operating_systems.sort() + + return template.render( + container_engines=container_engines, + network_plugins=network_plugins, + operating_systems=operating_systems, + exists=self.exists + ) + + def markdown(self): + out = '' + for container_manager in self.db.get_unique_ids('container_manager'): + # Prepare the headers + out += "# " + container_manager + "\n" + headers = '|OS / CNI| ' + underline = '|----|' + for network_plugin in self.db.get_unique_ids("network_plugin"): + headers += network_plugin + ' | ' + underline += '----|' + out += headers + "\n" + underline + "\n" + for operating_system in self.db.get_unique_ids("operating_system"): + out += '| ' + operating_system + ' | ' + for network_plugin in self.db.get_unique_ids("network_plugin"): + if self.exists(container_manager, network_plugin, operating_system): + emoji = ':white_check_mark:' + else: + emoji = ':x:' + out += emoji + ' | ' + out += "\n" + + pprint(self.db.get_unique_ids('operating_system')) + pprint(self.db.get_unique_ids('network_plugin')) + return out + + + +if not p.is_dir(): + print("Path is not a directory") + sys.exit(2) + +data = Data() +files = p.glob('*.yml') +for f in files: + y = yaml.load(f.open(), Loader=yaml.FullLoader) + + container_manager = y.get('container_manager', 'containerd') + network_plugin = y.get('kube_network_plugin', 'calico') + x = re.match(r"^[a-z-]+_([a-z0-9]+).*", f.name) + operating_system = x.group(1) + data.set(container_manager=container_manager, network_plugin=network_plugin, operating_system=operating_system) +#print(data.markdown()) +print(data.jinja()) diff --git a/kubespray/tests/scripts/md-table/requirements.txt b/kubespray/tests/scripts/md-table/requirements.txt new file mode 100644 index 0000000..ecf5aac --- /dev/null +++ b/kubespray/tests/scripts/md-table/requirements.txt @@ -0,0 +1,4 @@ +pyaml +jinja2 +pathlib ; python_version < '3.10' +pydblite diff --git a/kubespray/tests/scripts/md-table/table.md.j2 b/kubespray/tests/scripts/md-table/table.md.j2 new file mode 100644 index 0000000..7e8f4ca --- /dev/null +++ b/kubespray/tests/scripts/md-table/table.md.j2 @@ -0,0 +1,15 @@ +# CI test coverage + +To generate this Matrix run `./tests/scripts/md-table/main.py` + +{%- for container_engine in container_engines %} + +## {{ container_engine }} + +| OS / CNI |{% for cni in network_plugins %} {{ cni }} |{% endfor %} +|---|{% for cni in network_plugins %} --- |{% endfor %} +{%- for os in operating_systems %} +{{ os }} | {% for cni in network_plugins %} {{ ':white_check_mark:' if exists(container_engine, cni, os) else ':x:' }} |{% endfor %} +{%- endfor %} + +{%- endfor %} \ No newline at end of file diff --git a/kubespray/tests/scripts/md-table/test.sh b/kubespray/tests/scripts/md-table/test.sh new file mode 100755 index 0000000..3c2e581 --- /dev/null +++ b/kubespray/tests/scripts/md-table/test.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -euxo pipefail + +echo "Install requirements..." +pip install -r ./tests/scripts/md-table/requirements.txt + +echo "Generate current file..." +./tests/scripts/md-table/main.py > tmp.md + +echo "Compare docs/ci.md with actual tests in tests/files/*.yml ..." +cmp docs/ci.md tmp.md \ No newline at end of file diff --git a/kubespray/tests/scripts/molecule_logs.sh b/kubespray/tests/scripts/molecule_logs.sh new file mode 100755 index 0000000..4908d81 --- /dev/null +++ b/kubespray/tests/scripts/molecule_logs.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Ensure a clean environent +rm -fr molecule_logs +mkdir -p molecule_logs + +# Collect and archive the logs +find ~/.cache/molecule/ -name \*.out -o -name \*.err -type f | xargs tar -uf molecule_logs/molecule.tar +gzip molecule_logs/molecule.tar diff --git a/kubespray/tests/scripts/molecule_run.sh b/kubespray/tests/scripts/molecule_run.sh new file mode 100755 index 0000000..9604238 --- /dev/null +++ b/kubespray/tests/scripts/molecule_run.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -euxo pipefail -o noglob + +export LC_ALL=C.UTF-8 +export LANG=C.UTF-8 + +_PATH='roles' +_EXCLUDE="" + +while [[ $# -gt 0 ]] ; do + case $1 in + -e|--exclude) + _EXCLUDE="${_EXCLUDE} -not -path ${_PATH}/$2/*" + shift + shift + ;; + -i|--include) + _PATH="${_PATH}/$2" + shift + shift + ;; + -h|--help) + echo "Usage: molecule_run.sh [-h|--help] [-e|--exclude] [-i|--include]" + exit 0 + ;; + esac +done + +for d in $(find ${_PATH} ${_EXCLUDE} -name molecule -type d) +do + pushd $(dirname $d) + molecule test --all + popd +done diff --git a/kubespray/tests/scripts/rebase.sh b/kubespray/tests/scripts/rebase.sh new file mode 100755 index 0000000..36cb7f6 --- /dev/null +++ b/kubespray/tests/scripts/rebase.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -euxo pipefail + +KUBESPRAY_NEXT_VERSION=2.$(( ${KUBESPRAY_VERSION:3:2} + 1 )) + +# Rebase PRs on master (or release branch) to get latest changes +if [[ $CI_COMMIT_REF_NAME == pr-* ]]; then + git config user.email "ci@kubespray.io" + git config user.name "CI" + if [[ -z "`git branch -a --list origin/release-$KUBESPRAY_NEXT_VERSION`" ]]; then + git pull --rebase origin master + else + git pull --rebase origin release-$KUBESPRAY_NEXT_VERSION + fi +fi diff --git a/kubespray/tests/scripts/terraform_install.sh b/kubespray/tests/scripts/terraform_install.sh new file mode 100755 index 0000000..4228bbd --- /dev/null +++ b/kubespray/tests/scripts/terraform_install.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -euxo pipefail + +apt-get install -y unzip +curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip +unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version diff --git a/kubespray/tests/scripts/testcases_cleanup.sh b/kubespray/tests/scripts/testcases_cleanup.sh new file mode 100755 index 0000000..71b7fdc --- /dev/null +++ b/kubespray/tests/scripts/testcases_cleanup.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -euxo pipefail + +cd tests && make delete-${CI_PLATFORM} -s ; cd - + +if [ -d ~/.ara ] ; then + tar czvf ${CI_PROJECT_DIR}/cluster-dump/ara.tgz ~/.ara + rm -fr ~/.ara +fi diff --git a/kubespray/tests/scripts/testcases_prepare.sh b/kubespray/tests/scripts/testcases_prepare.sh new file mode 100755 index 0000000..de36f49 --- /dev/null +++ b/kubespray/tests/scripts/testcases_prepare.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -euxo pipefail + +: ${ANSIBLE_MAJOR_VERSION:=2.12} + +/usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core +/usr/bin/python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt +mkdir -p /.ssh +mkdir -p cluster-dump +mkdir -p $HOME/.ssh +ansible-playbook --version + +# in some cases we may need to bring in collections or roles from ansible-galaxy +# to compensate for missing functionality in older ansible versions +if [ -f requirements-${ANSIBLE_MAJOR_VERSION}.yml ] ; then + ansible-galaxy role install -r requirements-${ANSIBLE_MAJOR_VERSION}.yml + ansible-galaxy collection install -r requirements-${ANSIBLE_MAJOR_VERSION}.yml +fi diff --git a/kubespray/tests/scripts/testcases_run.sh b/kubespray/tests/scripts/testcases_run.sh new file mode 100755 index 0000000..eac0afe --- /dev/null +++ b/kubespray/tests/scripts/testcases_run.sh @@ -0,0 +1,129 @@ +#!/bin/bash +set -euxo pipefail + +echo "CI_JOB_NAME is $CI_JOB_NAME" +CI_TEST_ADDITIONAL_VARS="" + +if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then + if [ "${UPGRADE_TEST}" == "false" ]; then + echo "Job name contains 'upgrade', but UPGRADE_TEST='false'" + exit 1 + fi +else + if [ "${UPGRADE_TEST}" != "false" ]; then + echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'" + exit 1 + fi +fi + +# needed for ara not to complain +export TZ=UTC + +export ANSIBLE_REMOTE_USER=$SSH_USER +export ANSIBLE_BECOME=true +export ANSIBLE_BECOME_USER=root +export ANSIBLE_CALLBACK_PLUGINS="$(python -m ara.setup.callback_plugins)" + +cd tests && make create-${CI_PLATFORM} -s ; cd - +ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml + +# Flatcar Container Linux needs auto update disabled +if [[ "$CI_JOB_NAME" =~ "coreos" ]]; then + ansible all -m raw -a 'systemctl disable locksmithd' + ansible all -m raw -a 'systemctl stop locksmithd' + mkdir -p /opt/bin && ln -s /usr/bin/python /opt/bin/python +fi + +if [[ "$CI_JOB_NAME" =~ "opensuse" ]]; then + # OpenSUSE needs netconfig update to get correct resolv.conf + # See https://goinggnu.wordpress.com/2013/10/14/how-to-fix-the-dns-in-opensuse-13-1/ + ansible all -m raw -a 'netconfig update -f' + # Auto import repo keys + ansible all -m raw -a 'zypper --gpg-auto-import-keys refresh' +fi + +if [[ "$CI_JOB_NAME" =~ "ubuntu" ]]; then + # We need to tell ansible that ubuntu hosts are python3 only + CI_TEST_ADDITIONAL_VARS="-e ansible_python_interpreter=/usr/bin/python3" +fi + +ENABLE_040_TEST="true" +if [[ "$CI_JOB_NAME" =~ "hardening" ]]; then + # TODO: We need to remove this condition by finding alternative container + # image instead of netchecker which doesn't work at hardening environments. + ENABLE_040_TEST="false" +fi + +# Check out latest tag if testing upgrade +test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION" +# Checkout the CI vars file so it is available +test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml +test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_REGISTRY_MIRROR} +test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_SETTING} + +# Create cluster +ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml + +# Repeat deployment if testing upgrade +if [ "${UPGRADE_TEST}" != "false" ]; then + test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml" + test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml" + git checkout "${CI_BUILD_REF}" + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK +fi + +# Test control plane recovery +if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml +fi + +# Tests Cases +## Test Master API +ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL + +## Test that all nodes are Ready +ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/015_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL + +## Test that all pods are Running +ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/020_check-pods-running.yml $ANSIBLE_LOG_LEVEL + +## Test pod creation and ping between them +ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL + +## Advanced DNS checks +if [ "${ENABLE_040_TEST}" = "true" ]; then + ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL +fi + +## Kubernetes conformance tests +ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL + +if [ "${IDEMPOT_CHECK}" = "true" ]; then + ## Idempotency checks 1/5 (repeat deployment) + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} ${CI_TEST_ADDITIONAL_VARS} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml + + ## Idempotency checks 2/5 (Advanced DNS checks) + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml + + if [ "${RESET_CHECK}" = "true" ]; then + ## Idempotency checks 3/5 (reset deployment) + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml + + ## Idempotency checks 4/5 (redeploy after reset) + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml + + ## Idempotency checks 5/5 (Advanced DNS checks) + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml + fi +fi + +# Test node removal procedure +if [ "${REMOVE_NODE_CHECK}" = "true" ]; then + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME} --limit "all:!fake_hosts" remove-node.yml +fi + +# Clean up at the end, this is to allow stage1 tests to include cleanup test +if [ "${RESET_CHECK}" = "true" ]; then + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml +fi diff --git a/kubespray/tests/scripts/vagrant-validate.sh b/kubespray/tests/scripts/vagrant-validate.sh new file mode 100755 index 0000000..337782e --- /dev/null +++ b/kubespray/tests/scripts/vagrant-validate.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -euxo pipefail + +curl -sL "https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb" -o "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb" +dpkg -i "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb" +vagrant validate --ignore-provider diff --git a/kubespray/tests/scripts/vagrant_clean.sh b/kubespray/tests/scripts/vagrant_clean.sh new file mode 100755 index 0000000..b048818 --- /dev/null +++ b/kubespray/tests/scripts/vagrant_clean.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -euxo pipefail + +# Cleanup vagrant VMs to avoid name conflicts + +apt-get install -y libvirt-clients + +for i in $(virsh list --name) +do + virsh destroy "$i" + virsh undefine "$i" +done + + +# Cleanup domain volumes +for i in $(virsh vol-list default|grep \.img |grep -v VAGRANTSLASH | cut -f 2 -d ' ') +do + virsh vol-delete "$i" --pool default +done \ No newline at end of file diff --git a/kubespray/tests/shebang-unit b/kubespray/tests/shebang-unit new file mode 100755 index 0000000..6f9a2bc --- /dev/null +++ b/kubespray/tests/shebang-unit @@ -0,0 +1,1146 @@ +#!/usr/bin/env bash + +# Copyright (C) 2015, Arpinum +# +# shebang-unit is free software: you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, either version 3 of the License, or (at your option) any later +# version. +# +# shebang-unit is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# shebang-unit. If not, see http://www.gnu.org/licenses/lgpl.html. + +# shebang-unit all in one source file + + +configuration__load() { + # yes/no representation used with shebang-unit parameters to activate + # stuff like colors + SBU_YES="yes" + SBU_NO="no" + + # Colors for outputs + SBU_GREEN_COLOR_CODE="\\033[1;32m" + SBU_RED_COLOR_CODE="\\033[1;31m" + SBU_YELLOW_COLOR_CODE="\\033[1;33m" + SBU_DEFAULT_COLOR_CODE="\\e[0m" + + # Functions coding coventions + SBU_GLOBAL_SETUP_FUNCTION_NAME="global_setup" + SBU_GLOBAL_TEARDOWN_FUNCTION_NAME="global_teardown" + SBU_SETUP_FUNCTION_NAME="setup" + SBU_TEARDOWN_FUNCTION_NAME="teardown" + SBU_FUNCTION_DECLARATION_REGEX="^[ ]*\(function\)\{0,1\}[ ]*\([A-Za-z0-9_-]\{1,\}\)[ ]*\(([ ]*)\)\{0,1\}[ ]*{" + SBU_PRIVATE_FUNCTION_NAME_REGEX="^_.*" + + # Default configuration that can be modified with shebang-unit parameters + # For more information see shebang-unit usages + SBU_TEST_FILE_PATTERN="*_test.sh" + SBU_TEST_FUNCTION_PATTERN="*" + SBU_USE_COLORS="${SBU_YES}" + SBU_RANDOM_RUN="${SBU_NO}" + SBU_REPORTERS="simple" + SBU_JUNIT_REPORTER_OUTPUT_FILE="./junit_report.xml" + + # Internal constants + SBU_SUCCESS_STATUS_CODE=0 + SBU_FAILURE_STATUS_CODE=1 + SBU_VALUE_SEPARATOR="," + SBU_TEMP_DIR="/tmp/.shebang-unit" + SBU_LAST_ASSERTION_MSG_KEY="last_assertion_message" + SBU_NO_RUN="${SBU_NO}" + SBU_STANDARD_FD=42 + SBU_ERROR_FD=43 +} + + +assertion__equal() { + if [[ "$1" != "$2" ]]; then + _assertion__failed "Actual: <$2>, expected: <$1>." + fi +} + +assertion__different() { + if [[ "$1" == "$2" ]]; then + _assertion__failed "Both values are: <$1>." + fi +} + +assertion__string_contains() { + if ! system__string_contains "$1" "$2"; then + _assertion__failed "String: <$1> does not contain: <$2>." + fi +} + +assertion__string_does_not_contain() { + if system__string_contains "$1" "$2"; then + _assertion__failed "String: <$1> contains: <$2>." + fi +} + +assertion__string_empty() { + if [[ -n "$1" ]]; then + _assertion__failed "String: <$1> is not empty." + fi +} + +assertion__string_not_empty() { + if [[ -z "$1" ]]; then + _assertion__failed "The string is empty." + fi +} + +assertion__array_contains() { + local element=$1 + shift 1 + if ! array__contains "${element}" "$@"; then + local array_as_string="$(system__pretty_print_array "$@")" + _assertion__failed \ + "Array: <${array_as_string}> does not contain: <${element}>." + fi +} + +assertion__array_does_not_contain() { + local element=$1 + shift 1 + if array__contains "${element}" "$@"; then + local array_as_string="$(system__pretty_print_array "$@")" + _assertion__failed \ + "Array: <${array_as_string}> contains: <${element}>." + fi +} + +assertion__status_code_is_success() { + if (( $1 != ${SBU_SUCCESS_STATUS_CODE} )); then + _assertion__failed \ + "Status code is failure instead of success." "$2" + fi +} + +assertion__status_code_is_failure() { + if (( $1 == ${SBU_SUCCESS_STATUS_CODE} )); then + _assertion__failed \ + "Status code is success instead of failure." "$2" + fi +} + +assertion__successful() { + "$@" + if (( $? != ${SBU_SUCCESS_STATUS_CODE} )); then + _assertion__failed "Command is failing instead of successful." + fi +} + +assertion__failing() { + "$@" + if (( $? == ${SBU_SUCCESS_STATUS_CODE} )); then + _assertion__failed "Command is successful instead of failing." + fi +} + +_assertion__failed() { + local message_to_use="$(_assertion__get_assertion_message_to_use "$1" "$2")" + system__print_line "Assertion failed. ${message_to_use}" + exit ${SBU_FAILURE_STATUS_CODE} +} + +_assertion__get_assertion_message_to_use() { + local message=$1 + local custom_messsage=$2 + if [[ -n "${custom_messsage}" ]]; then + system__print "${message} ${custom_messsage}" + else + system__print "${message}" + fi +} + + +mock__make_function_do_nothing() { + mock__make_function_call "$1" ":" +} + +mock__make_function_prints() { + local function=$1 + local text=$2 + eval "${function}() { printf "${text}"; }" +} + +mock__make_function_call() { + local function_to_mock=$1 + local function_to_call=$2 + shift 2 + eval "${function_to_mock}() { ${function_to_call} \"\$@\"; }" +} + + +runner__run_all_test_files() { + SBU_BASE_TEST_DIRECTORY=$1 + reporter__test_files_start_running + timer__store_current_time "global_time" + results__test_files_start_running + _runner__run_all_test_files_with_pattern_in_directory "$1" + reporter__test_files_end_running "$(timer__get_time_elapsed "global_time")" + runner__tests_are_successful +} + +_runner__run_all_test_files_with_pattern_in_directory() { + local file + local files + array__from_lines files <<< "$(_runner__get_test_files_in_directory "$1")" + for file in "${files[@]}"; do + file_runner__run_test_file "${file}" + done +} + +_runner__get_test_files_in_directory() { + local files + array__from_lines files <<< "$(find "$1" -name "${SBU_TEST_FILE_PATTERN}" | sort)" + if [[ "${SBU_RANDOM_RUN}" == "${SBU_YES}" ]]; then + array__from_lines files <<< "$(system__randomize_array "${files[@]}")" + fi + array__print "${files[@]}" +} + +runner__tests_are_successful() { + (( $(results__get_failing_tests_count) == 0 \ + && $(results__get_skipped_tests_count) == 0 )) +} + + +file_runner__run_test_file() { + local file=$1 + local public_functions=($(parser__get_public_functions_in_file "${file}")) + local test_functions=($(_file_runner__get_test_functions)) + reporter__test_file_starts_running "${file}" "${#test_functions[@]}" + ( source "${file}" + _file_runner__run_global_setup_if_exists \ + && _file_runner__call_all_tests + _file_runner__run_global_teardown_if_exists ) + _file_runner__check_if_global_setup_has_exited + reporter__test_file_ends_running +} + +_file_runner__run_all_tests_if_global_setup_is_successful() { + _file_runner__call_all_tests +} + +_file_runner__call_all_tests() { + local i + for (( i=0; i < ${#test_functions[@]}; i++ )); do + test_runner__run_test "${test_functions[${i}]}" "${public_functions[@]}" + done +} + +_file_runner__skip_all_tests() { + local i + for (( i=0; i < ${#test_functions[@]}; i++ )); do + test_runner__skip_test "${test_functions[${i}]}" "${public_functions[@]}" + done +} + +_file_runner__get_test_functions() { + local result=() + local test_function + for test_function in "${public_functions[@]}"; do + if _file_runner__function_is_a_test "${test_function}"\ + && [[ "${test_function}" == ${SBU_TEST_FUNCTION_PATTERN} ]]; then + result+=("${test_function}") + fi + done + _file_runner__get_randomized_test_functions_if_needed "${result[@]}" +} + +_file_runner__get_randomized_test_functions_if_needed() { + if [[ "${SBU_RANDOM_RUN}" == "${SBU_YES}" ]]; then + system__randomize_array "$@" + else + array__print "$@" + fi +} + +_file_runner__run_global_setup_if_exists() { + database__put "sbu_current_global_setup_has_failed" "${SBU_YES}" + _file_runner__call_function_if_exists "${SBU_GLOBAL_SETUP_FUNCTION_NAME}" \ + && database__put "sbu_current_global_setup_has_failed" "${SBU_NO}" +} + +_file_runner__run_global_teardown_if_exists() { + _file_runner__call_function_if_exists "${SBU_GLOBAL_TEARDOWN_FUNCTION_NAME}" +} + +_file_runner__function_is_a_test() { + ! array__contains "$1" \ + "${SBU_GLOBAL_SETUP_FUNCTION_NAME}" \ + "${SBU_GLOBAL_TEARDOWN_FUNCTION_NAME}" \ + "${SBU_SETUP_FUNCTION_NAME}" \ + "${SBU_TEARDOWN_FUNCTION_NAME}" +} + +_file_runner__call_function_if_exists() { + local function=$1 + shift 1 + if array__contains "${function}" "${public_functions[@]}"; then + "${function}" + fi +} + +_file_runner__check_if_global_setup_has_exited() { + local has_exited="$(database__get "sbu_current_global_setup_has_failed")" + if [[ "${has_exited}" == "${SBU_YES}" ]]; then + _file_runner__handle_failure_in_global_setup + fi +} + +_file_runner__handle_failure_in_global_setup() { + reporter__global_setup_has_failed + _file_runner__skip_all_tests +} + + +parser__get_public_functions_in_file() { + _parser__find_functions_in_file "$1" \ + | _parser__filter_private_functions \ + | awk '{ print $1 }' +} + +_parser__find_functions_in_file() { + grep -o "${SBU_FUNCTION_DECLARATION_REGEX}" "$1" \ + | _parser__get_function_name_from_declaration +} + +_parser__filter_private_functions() { + grep -v "${SBU_PRIVATE_FUNCTION_NAME_REGEX}" +} + +_parser__get_function_name_from_declaration() { + sed "s/${SBU_FUNCTION_DECLARATION_REGEX}/\2/" +} + + +timer__store_current_time() { + local id=$1 + database__put "sbu_beginning_date_$1" "$(system__get_date_in_seconds)" +} + +timer__get_time_elapsed() { + local id=$1 + local beginning_date="$(database__get "sbu_beginning_date_$1")" + local ending_date="$(system__get_date_in_seconds)" + + [[ -n "${beginning_date}" ]] \ + && system__print "$(( ending_date - beginning_date ))" \ + || system__print "0" +} + + +results__test_files_start_running() { + database__put "sbu_successful_tests_count" "0" + database__put "sbu_failing_tests_count" "0" + database__put "sbu_skipped_tests_count" "0" +} + +results__get_successful_tests_count() { + _results__get_tests_count_of_type "successful" +} + +results__increment_successful_tests() { + _results__increment_tests_of_type "successful" +} + +results__get_failing_tests_count() { + _results__get_tests_count_of_type "failing" +} + +results__increment_failing_tests() { + _results__increment_tests_of_type "failing" +} + +results__get_skipped_tests_count() { + _results__get_tests_count_of_type "skipped" +} + +results__increment_skipped_tests() { + _results__increment_tests_of_type "skipped" +} + +results__get_total_tests_count() { + local successes="$(results__get_successful_tests_count)" + local failures="$(results__get_failing_tests_count)" + local skipped="$(results__get_skipped_tests_count)" + system__print "$(( successes + failures + skipped ))" +} + +_results__get_tests_count_of_type() { + local type=$1 + database__get "sbu_${type}_tests_count" +} + +_results__increment_tests_of_type() { + local type=$1 + local count="$(results__get_${type}_tests_count)" + database__put "sbu_${type}_tests_count" "$(( count + 1 ))" +} + + +test_runner__run_test() { + local test_function=$1 + shift 1 + reporter__test_starts_running "${test_function}" + timer__store_current_time "test_time" + ( + _test_runner__call_setup_if_exists "$@" \ + && _test_runner__call_test_fonction "${test_function}" + local setup_and_test_code=$? + _test_runner__call_teardown_if_exists "$@" + (( $? == ${SBU_SUCCESS_STATUS_CODE} \ + && ${setup_and_test_code} == ${SBU_SUCCESS_STATUS_CODE} )) + ) + _test_runner__parse_test_function_result $? + reporter__test_ends_running "$(timer__get_time_elapsed "test_time")" +} + +_test_runner__call_test_fonction() { + ( "$1" >&${SBU_STANDARD_FD} 2>&${SBU_ERROR_FD} ) +} + +_test_runner__call_setup_if_exists() { + _test_runner__call_function_if_exits "${SBU_SETUP_FUNCTION_NAME}" "$@" +} + +_test_runner__call_teardown_if_exists() { + _test_runner__call_function_if_exits "${SBU_TEARDOWN_FUNCTION_NAME}" "$@" +} + +_test_runner__parse_test_function_result() { + if (( $1 == ${SBU_SUCCESS_STATUS_CODE} )); then + results__increment_successful_tests + reporter__test_has_succeeded + else + results__increment_failing_tests + reporter__test_has_failed + fi +} + +_test_runner__call_function_if_exits() { + local function=$1 + shift 1 + if array__contains "${function}" "$@"; then + "${function}" + fi +} + +test_runner__skip_test() { + local test_function=$1 + reporter__test_starts_running "${test_function}" + results__increment_skipped_tests + reporter__test_is_skipped "${test_function}" + reporter__test_ends_running 0 +} + + +reporter__test_files_start_running() { + _reporter__initialise_file_descriptors + reporter__for_each_reporter \ + _reporter__call_function "test_files_start_running" "$@" +} + +_reporter__initialise_file_descriptors() { + eval "exec ${SBU_STANDARD_FD}>&1" + eval "exec ${SBU_ERROR_FD}>&2" +} + +reporter__global_setup_has_failed() { + reporter__for_each_reporter \ + _reporter__call_function "global_setup_has_failed" "$@" +} + +reporter__test_file_starts_running() { + reporter__for_each_reporter \ + _reporter__call_function "test_file_starts_running" "$@" +} + +reporter__test_starts_running() { + reporter__for_each_reporter \ + _reporter__call_function "test_starts_running" "$@" +} + +reporter__test_has_succeeded() { + reporter__for_each_reporter \ + _reporter__call_function "test_has_succeeded" "$@" +} + +reporter__test_has_failed() { + reporter__for_each_reporter \ + _reporter__call_function "test_has_failed" "$@" +} + +reporter__test_is_skipped() { + reporter__for_each_reporter \ + _reporter__call_function "test_is_skipped" "$@" +} + +reporter__test_ends_running() { + reporter__for_each_reporter \ + _reporter__call_function "test_ends_running" "$@" +} + +reporter__test_file_ends_running() { + reporter__for_each_reporter \ + _reporter__call_function "test_file_ends_running" "$@" +} + +reporter__test_files_end_running() { + reporter__for_each_reporter \ + _reporter__call_function "test_files_end_running" "$@" + _reporter__release_file_descriptors +} + +_reporter__release_file_descriptors() { + eval "exec 1>&${SBU_STANDARD_FD} ${SBU_STANDARD_FD}>&-" + eval "exec 2>&${SBU_ERROR_FD} ${SBU_ERROR_FD}>&-" +} + +_reporter__call_function() { + local function=$1 + shift 1 + "${reporter}_reporter__${function}" "$@" +} + +reporter__for_each_reporter() { + local reporter + for reporter in ${SBU_REPORTERS//${SBU_VALUE_SEPARATOR}/ }; do + "$@" + done +} + +reporter__print_with_color() { + system__print_with_color "$@" >&${SBU_STANDARD_FD} +} + +reporter__print_line() { + system__print_line "$@" >&${SBU_STANDARD_FD} +} + +reporter__print_line_with_color() { + system__print_line_with_color "$@" >&${SBU_STANDARD_FD} +} + +reporter__print_new_line() { + system__print_new_line >&${SBU_STANDARD_FD} +} + +reporter__get_color_code_for_tests_result() { + local color_code=${SBU_GREEN_COLOR_CODE} + if ! runner__tests_are_successful; then + color_code=${SBU_RED_COLOR_CODE} + fi + system__print "${color_code}" +} + +reporter__get_test_file_relative_name() { + system__print "${1#${SBU_BASE_TEST_DIRECTORY}\/}" +} + + +simple_reporter__test_files_start_running() { + : +} + +simple_reporter__test_file_starts_running() { + local relative_name="$(reporter__get_test_file_relative_name "$1")" + reporter__print_line "[File] ${relative_name}" +} + +simple_reporter__global_setup_has_failed() { + reporter__print_line_with_color \ + "Global setup has failed" ${SBU_YELLOW_COLOR_CODE} +} + +simple_reporter__test_starts_running() { + reporter__print_line "[Test] $1" +} + +simple_reporter__test_has_succeeded() { + reporter__print_line_with_color "OK" ${SBU_GREEN_COLOR_CODE} +} + +simple_reporter__test_has_failed() { + reporter__print_line_with_color "KO" ${SBU_RED_COLOR_CODE} +} + +simple_reporter__test_is_skipped() { + reporter__print_line_with_color "Skipped" ${SBU_YELLOW_COLOR_CODE} +} + +simple_reporter__test_ends_running() { + : +} + +simple_reporter__test_file_ends_running() { + reporter__print_new_line +} + +simple_reporter__test_files_end_running() { + local time="in $1s" + reporter__print_line "[Results]" + local color="$(reporter__get_color_code_for_tests_result)" + local total_count="$(_simple_reporter__get_total_count_message)" + local failures_count="$(_simple_reporter__get_failures_count_message)" + local skipped_count="$(results__get_skipped_tests_count) skipped" + local message="${total_count}, ${failures_count}, ${skipped_count} ${time}" + reporter__print_line_with_color "${message}" "${color}" +} + +_simple_reporter__get_total_count_message() { + local count="$(results__get_total_tests_count)" + system__print "${count} test$(_simple_reporter__get_agreement ${count})" +} + +_simple_reporter__get_failures_count_message() { + local count="$(results__get_failing_tests_count)" + system__print "${count} failure$(_simple_reporter__get_agreement ${count})" +} + +_simple_reporter__get_agreement() { + (( $1 > 1 )) \ + && system__print "s" \ + || system__print "" +} + + +dots_reporter__test_files_start_running() { + exec 1>/dev/null + exec 2>/dev/null +} + +dots_reporter__test_file_starts_running() { + : +} + +dots_reporter__global_setup_has_failed() { + : +} + +dots_reporter__test_starts_running() { + : +} + +dots_reporter__test_has_succeeded() { + reporter__print_with_color "." ${SBU_GREEN_COLOR_CODE} +} + +dots_reporter__test_has_failed() { + reporter__print_with_color "F" ${SBU_RED_COLOR_CODE} +} + +dots_reporter__test_is_skipped() { + reporter__print_with_color "S" ${SBU_YELLOW_COLOR_CODE} +} + +dots_reporter__test_ends_running() { + : +} + +dots_reporter__test_file_ends_running() { + : +} + +dots_reporter__test_files_end_running() { + local color="$(reporter__get_color_code_for_tests_result)" + local texte="$(runner__tests_are_successful \ + && system__print "OK" \ + || system__print "KO")" + reporter__print_line_with_color "${texte}" "${color}" +} + + +junit_reporter__test_files_start_running() { + _junit_reporter__initialise_report_with \ + "" + _junit_reporter__write_line_to_report "" +} + +junit_reporter__test_file_starts_running() { + local file_name=$1 + local test_count=$2 + local suite_name="$(_junit_reporter__get_suite_name "${file_name}")" + database__put "sbu_current_suite_name" "${suite_name}" + _junit_reporter__write_line_to_report \ + " " + _junit_reporter__delete_all_outputs_lines "suite" + _junit_reporter__redirect_outputs_to_database "suite" +} + +junit_reporter__global_setup_has_failed() { + : +} + +junit_reporter__test_starts_running() { + local suite_name="$(database__get "sbu_current_suite_name")" + local test_name="$(xml__encode_text "$1")" + _junit_reporter__write_line_to_report \ + " " + _junit_reporter__delete_all_outputs_lines "test" + _junit_reporter__redirect_outputs_to_database "test" +} + +junit_reporter__test_has_succeeded() { + : +} + +junit_reporter__test_has_failed() { + _junit_reporter__write_line_to_report " " + _junit_reporter__write_line_to_report " " +} + +junit_reporter__test_is_skipped() { + _junit_reporter__write_line_to_report " " + _junit_reporter__write_line_to_report " " +} + +junit_reporter__test_ends_running() { + _junit_reporter__redirect_outputs_to_database "suite" + _junit_reporter__write_time_in_current_test_case_tag_in_report "$1" + _junit_reporter__flush_all_outputs_to_report_if_any "test" + _junit_reporter__write_line_to_report " " +} + +_junit_reporter__write_time_in_current_test_case_tag_in_report() { + local test_time=$1 + local report_content=$(cat "${SBU_JUNIT_REPORTER_OUTPUT_FILE}") + local content_with_time="$(system__substitute_variable \ + "${report_content}" "sbu_current_test_time" "${test_time}")" + system__print_line \ + "${content_with_time}" > "${SBU_JUNIT_REPORTER_OUTPUT_FILE}" +} + +junit_reporter__test_file_ends_running() { + _junit_reporter__flush_all_outputs_to_report_if_any "suite" + _junit_reporter__write_line_to_report " " + database__put "sbu_current_suite_name" "" +} + +junit_reporter__test_files_end_running() { + _junit_reporter__write_line_to_report "" +} + +_junit_reporter__get_suite_name() { + local relative_name="$(reporter__get_test_file_relative_name "$1")" + local dots_replaced_by_underscores="${relative_name//./_}" + local slashes_replaced_by_dots="${dots_replaced_by_underscores//\//.}" + xml__encode_text "${slashes_replaced_by_dots}" +} + +_junit_reporter__initialise_report_with() { + system__print_line "$1" > "${SBU_JUNIT_REPORTER_OUTPUT_FILE}" +} + +_junit_reporter__write_line_to_report() { + system__print_line "$1" >> "${SBU_JUNIT_REPORTER_OUTPUT_FILE}" +} + +_junit_reporter__redirect_outputs_to_database() { + local scope=$1 + exec 1>>\ + "$(database__get_descriptor "sbu_current_${scope}_standard_ouputs_lines")" + exec 2>>\ + "$(database__get_descriptor "sbu_current_${scope}_error_ouputs_lines")" +} + +_junit_reporter__delete_all_outputs_lines() { + database__put "sbu_current_$1_standard_ouputs_lines" + database__put "sbu_current_$1_error_ouputs_lines" +} + +_junit_reporter__flush_all_outputs_to_report_if_any() { + _junit_reporter__flush_outputs_to_report_if_any "$1" "standard" + _junit_reporter__flush_outputs_to_report_if_any "$1" "error" +} + +_junit_reporter__flush_outputs_to_report_if_any() { + local scope=$1 + local outputs_type=$2 + local key="sbu_current_${scope}_${outputs_type}_ouputs_lines" + local outputs="$(database__get "${key}")" + if [[ -n "${outputs}" ]]; then + _junit_reporter__write_outputs_to_report \ + "${scope}" "${outputs_type}" "${outputs}" + database__put "${key}" "" + fi +} + +_junit_reporter__write_outputs_to_report() { + local scope=$1 + local outputs_type=$2 + local outputs=$3 + local tag="$(_junit_reporter__get_tag_for_outputs_type "${outputs_type}")" + local indentation="$(_junit_reporter__get_indentation_for_scope "${scope}")" + _junit_reporter__write_line_to_report "${indentation}<${tag}>" + _junit_reporter__write_line_to_report "$(xml__encode_text "${outputs}")" + _junit_reporter__write_line_to_report "${indentation}" +} + +_junit_reporter__get_tag_for_outputs_type() { + [[ "$1" == "standard" ]] \ + && system__print "system-out" \ + || system__print "system-err" +} + +_junit_reporter__get_indentation_for_scope() { + [[ "$1" == "suite" ]] \ + && system__print " " \ + || system__print " " +} + + +xml__encode_text() { + local encoded=${1//\&/\&\;} + encoded=${encoded//\/\>\;} + encoded=${encoded//\"/\"\;} + encoded=${encoded//\'/\&apos\;} + system__print "${encoded}" +} + + +database__initialise() { + _SBU_DB_TOKEN="$(system__random)" + _database__ensure_directory_exists +} + +database__release() { + rm -rf "$(_database__get_dir)" +} + +database__put() { + _database__ensure_directory_exists + system__print "$2" > "$(_database__get_dir)/$1" +} + +database__post() { + _database__ensure_directory_exists + system__print "$2" >> "$(_database__get_dir)/$1" +} + +database__post_line() { + _database__ensure_directory_exists + system__print_line "$2" >> "$(_database__get_dir)/$1" +} + +database__put_variable() { + _database__ensure_directory_exists + database__put "$1" "${!1}" +} + +database__get() { + [[ -e "$(_database__get_dir)/$1" ]] && cat "$(_database__get_dir)/$1" +} + +database__get_descriptor() { + system__print "$(_database__get_dir)/$1" +} + +_database__ensure_directory_exists() { + mkdir -p "$(_database__get_dir)" +} + +_database__get_dir() { + system__print "${SBU_TEMP_DIR}/database/${_SBU_DB_TOKEN}" +} + + +system__get_string_or_default() { + [[ -n "$1" ]] \ + && system__print "$1" \ + || system__print "$2" +} + +system__get_date_in_seconds() { + date +%s +} + +system__print_line_with_color() { + system__print_with_color "$@" + system__print_new_line +} + +system__print_with_color() { + if [[ "${SBU_USE_COLORS}" == "${SBU_YES}" ]]; then + printf "$2$1${SBU_DEFAULT_COLOR_CODE}" + else + system__print "$1" + fi +} + +system__print_line() { + system__print "$1" + system__print_new_line +} + +system__print() { + printf "%s" "$1" +} + +system__print_new_line() { + printf "\n" +} + +array__contains() { + local value=$1 + shift 1 + local i + for (( i=1; i <= $#; i++ )); do + if [[ "${!i}" == "${value}" ]]; then + return ${SBU_SUCCESS_STATUS_CODE} + fi + done + return ${SBU_FAILURE_STATUS_CODE} +} + +array__from_lines() { + local IFS=$'\n' + eval "$1=(\$( 0 )); do + local random_index=$(( $(system__random) % ${#copy[@]} )) + system__print_line "${copy[${random_index}]}" + unset copy[${random_index}] + copy=("${copy[@]}") + done +} + +system__random() { + system__print "${RANDOM}" +} + +system__substitute_variable() { + local string=$1 + local key="\$\{$2\}" + local value=$3 + printf "%s" "${string//${key}/${value}}" +} + + +main__main() { + configuration__load + _main__initialise + local parsed_arguments=0 + _main__parse_arguments "$@" + shift ${parsed_arguments} + _main__assert_only_one_argument_left $# + _main__assert_reporters_are_known + SBU_BASE_TEST_DIRECTORY=$1 + + if [[ "${SBU_NO_RUN}" != "${SBU_YES}" ]]; then + runner__run_all_test_files "$1" + return $? + fi +} + +_main__initialise() { + database__initialise + trap _main__release EXIT +} + +_main__release() { + database__release +} + +_main__parse_arguments() { + local argument + for argument in "$@"; do + case "${argument}" in + -a|--api-cheat-sheet) + _main__print_api_cheat_sheet_and_exit + ;; + -c=*|--colors=*) + SBU_USE_COLORS="${argument#*=}" + (( parsed_arguments++ )) + ;; + -d=*|--random-run=*) + SBU_RANDOM_RUN="${argument#*=}" + (( parsed_arguments++ )) + ;; + -h|--help) + _main__print_full_usage + exit ${SBU_SUCCESS_STATUS_CODE} + ;; + -f=*|--file-pattern=*) + SBU_TEST_FILE_PATTERN="${argument#*=}" + (( parsed_arguments++ )) + ;; + --no-run) + SBU_NO_RUN="${SBU_YES}" + (( parsed_arguments++ )) + ;; + -o=*|--output-file=*) + SBU_JUNIT_REPORTER_OUTPUT_FILE="${argument#*=}" + (( parsed_arguments++ )) + ;; + -t=*|--test-pattern=*) + SBU_TEST_FUNCTION_PATTERN="${argument#*=}" + (( parsed_arguments++ )) + ;; + -r=*|--reporters=*) + SBU_REPORTERS="${argument#*=}" + (( parsed_arguments++ )) + ;; + -*|--*) + _main__print_illegal_option "${argument}" + _main__print_usage_and_exit_with_code ${SBU_FAILURE_STATUS_CODE} + ;; + esac + done +} + + _main__assert_reporters_are_known() { + reporter__for_each_reporter _main__fail_if_reporter_unknown +} + +_main__fail_if_reporter_unknown() { + if ! array__contains "${reporter}" "simple" "dots" "junit"; then + system__print_line \ + "$(_main__get_script_name): unknown reporter <${reporter}>" + exit ${SBU_FAILURE_STATUS_CODE} + fi +} + +_main__print_illegal_option() { + local option="${1%=*}" + option="${option#-}" + option="${option#-}" + system__print_line "$(_main__get_script_name): illegal option -- ${option}" +} + +_main__assert_only_one_argument_left() { + if (( $1 > 1 )); then + system__print_line "$(_main__get_script_name): only one path is allowed" + _main__print_usage_and_exit_with_code ${SBU_FAILURE_STATUS_CODE} + fi +} + +_main__get_script_name() { + basename "${BASH_SOURCE[0]}" +} + +_main__print_usage_and_exit_with_code() { + _main__print_usage + exit $1 +} + +_main__print_full_usage() { + _main__print_usage + local script="$(_main__get_script_name)" + system__print_new_line + system__print_line "\ +[options] + -a, --api-cheat-sheet + print api cheat sheet like assertions + -c, --colors=${SBU_YES} or ${SBU_NO} + tests output with colors or no + -d, --random-run=${SBU_YES} or ${SBU_NO} + tests files and functions randomly run or no + -f, --file-pattern= + pattern to filter test files + -h + print usage + -o, --output-file= + output file for JUnit reporter + -r, --reporters= + comma-separated reporters (simple, dots or junit) + -t, --test-pattern= + pattern to filter test function in files + +[examples] + ${script} . + run all tests in current directory + ${script} -p=*test.sh sources/test + run all tests files ending with test.sh in sources/test" +} + +_main__print_usage() { + system__print_line "\ +usage: $(_main__get_script_name) [options] path + run all tests in path" +} + +_main__print_api_cheat_sheet_and_exit() { + system__print_line "\ +[assertions] + assertion__equal (value, other) + -> assert that is equal to + assertion__different (value, other) + -> assert that is different from + assertion__string_contains (string, substring) + -> assert that contains + assertion__string_does_not_contain (string, substring) + -> assert that does not contain + assertion__string_empty (string) + -> assert that is empty + assertion__string_not_empty (string) + -> assert that is not empty + assertion__array_contains (element, array[0], array[1], ...) + -> assert that the contains the + assertion__array_does_not_contain (element, array elements...) + -> assert that the does not contain the + assertion__successful (command) + -> assert that the is successful + assertion__failing (command) + -> assert that the is failing + assertion__status_code_is_success (code) + -> assert that the status is 0 + assertion__status_code_is_failure (code) + -> assert that the status is not 0 + +[special functions] + ${SBU_GLOBAL_SETUP_FUNCTION_NAME} + -> Executed before all tests in a file + ${SBU_GLOBAL_TEARDOWN_FUNCTION_NAME} + -> Executed after all tests in a file + ${SBU_SETUP_FUNCTION_NAME} + -> Executed before each test in a file + ${SBU_TEARDOWN_FUNCTION_NAME} + -> Executed after each test in a file + +[mocks] + mock__make_function_do_nothing (function_to_mock) + -> make function do nothing + mock__make_function_prints (function_to_mock, message) + -> make function prints a message + mock__make_function_call (function_to_mock, function_to_call) + -> make function call another function" + exit ${SBU_SUCCESS_STATUS_CODE} +} + + +main__main "$@" diff --git a/kubespray/tests/support/aws.groovy b/kubespray/tests/support/aws.groovy new file mode 100644 index 0000000..bc13b51 --- /dev/null +++ b/kubespray/tests/support/aws.groovy @@ -0,0 +1,94 @@ +def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) { + def inventory_path = pwd() + "/inventory/sample/${env.CI_JOB_NAME}-${env.BUILD_NUMBER}.ini" + dir('tests') { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + try { + create_vm("${env.CI_JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret) + install_cluster(inventory_path, credentialsId, network_plugin) + + test_apiserver(inventory_path, credentialsId) + test_create_pod(inventory_path, credentialsId) + test_network(inventory_path, credentialsId) + } finally { + delete_vm(inventory_path, credentialsId, aws_access, aws_secret) + } + } + } +} + +def create_vm(run_id, inventory_path, ami, username, network_plugin, aws_access, aws_secret) { + ansiblePlaybook( + inventory: 'local_inventory/hosts.cfg', + playbook: 'cloud_playbooks/create-aws.yml', + extraVars: [ + test_id: run_id, + kube_network_plugin: network_plugin, + aws_access_key: [value: aws_access, hidden: true], + aws_secret_key: [value: aws_secret, hidden: true], + aws_ami_id: ami, + aws_security_group: [value: 'sg-cb0327a2', hidden: true], + key_name: 'travis-ci', + inventory_path: inventory_path, + aws_region: 'eu-central-1', + ssh_user: username + ], + colorized: true + ) +} + +def delete_vm(inventory_path, credentialsId, aws_access, aws_secret) { + ansiblePlaybook( + inventory: inventory_path, + playbook: 'cloud_playbooks/delete-aws.yml', + credentialsId: credentialsId, + extraVars: [ + aws_access_key: [value: aws_access, hidden: true], + aws_secret_key: [value: aws_secret, hidden: true] + ], + colorized: true + ) +} + +def install_cluster(inventory_path, credentialsId, network_plugin) { + ansiblePlaybook( + inventory: inventory_path, + playbook: '../cluster.yml', + sudo: true, + credentialsId: credentialsId, + extraVars: [ + kube_network_plugin: network_plugin + ], + extras: "-e cloud_provider=aws", + colorized: true + ) +} + +def test_apiserver(inventory_path, credentialsId) { + ansiblePlaybook( + inventory: inventory_path, + playbook: 'testcases/010_check-apiserver.yml', + credentialsId: credentialsId, + colorized: true + ) +} + +def test_create_pod(inventory_path, credentialsId) { + ansiblePlaybook( + inventory: inventory_path, + playbook: 'testcases/020_check-create-pod.yml', + sudo: true, + credentialsId: credentialsId, + colorized: true + ) +} + +def test_network(inventory_path, credentialsId) { + ansiblePlaybook( + inventory: inventory_path, + playbook: 'testcases/030_check-network.yml', + sudo: true, + credentialsId: credentialsId, + colorized: true + ) +} +return this; diff --git a/kubespray/tests/templates/fake_hosts.yml.j2 b/kubespray/tests/templates/fake_hosts.yml.j2 new file mode 100644 index 0000000..6731092 --- /dev/null +++ b/kubespray/tests/templates/fake_hosts.yml.j2 @@ -0,0 +1,3 @@ +ansible_default_ipv4: + address: 255.255.255.255 +ansible_hostname: "{{ '{{' }}inventory_hostname}}" diff --git a/kubespray/tests/templates/inventory-aws.j2 b/kubespray/tests/templates/inventory-aws.j2 new file mode 100644 index 0000000..e3c5373 --- /dev/null +++ b/kubespray/tests/templates/inventory-aws.j2 @@ -0,0 +1,29 @@ +node1 ansible_ssh_host={{ec2.instances[0].public_ip}} ansible_ssh_user={{ssh_user}} +node2 ansible_ssh_host={{ec2.instances[1].public_ip}} ansible_ssh_user={{ssh_user}} +node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_user}} + +[kube_control_plane] +node1 +node2 + +[kube_node] +node1 +node2 +node3 + +[etcd] +node1 +node2 + +[k8s_cluster:children] +kube_node +kube_control_plane +calico_rr + +[calico_rr] + +[broken_kube_control_plane] +node2 + +[broken_etcd] +node2 diff --git a/kubespray/tests/templates/inventory-do.j2 b/kubespray/tests/templates/inventory-do.j2 new file mode 100644 index 0000000..fb54361 --- /dev/null +++ b/kubespray/tests/templates/inventory-do.j2 @@ -0,0 +1,47 @@ +{% for instance in droplets.results %} +{{instance.droplet.name}} ansible_ssh_host={{instance.droplet.ip_address}} +{% endfor %} + +{% if mode is defined and mode == "separate" %} +[kube_control_plane] +{{droplets.results[0].droplet.name}} + +[kube_node] +{{droplets.results[1].droplet.name}} + +[etcd] +{{droplets.results[2].droplet.name}} +{% elif mode is defined and mode == "ha" %} +[kube_control_plane] +{{droplets.results[0].droplet.name}} +{{droplets.results[1].droplet.name}} + +[kube_node] +{{droplets.results[2].droplet.name}} + +[etcd] +{{droplets.results[1].droplet.name}} +{{droplets.results[2].droplet.name}} + +[broken_kube_control_plane] +{{droplets.results[1].droplet.name}} + +[broken_etcd] +{{droplets.results[2].droplet.name}} +{% else %} +[kube_control_plane] +{{droplets.results[0].droplet.name}} + +[kube_node] +{{droplets.results[1].droplet.name}} + +[etcd] +{{droplets.results[0].droplet.name}} +{% endif %} + +[calico_rr] + +[k8s_cluster:children] +kube_node +kube_control_plane +calico_rr diff --git a/kubespray/tests/templates/inventory-gce.j2 b/kubespray/tests/templates/inventory-gce.j2 new file mode 100644 index 0000000..33e9bbc --- /dev/null +++ b/kubespray/tests/templates/inventory-gce.j2 @@ -0,0 +1,73 @@ +{% set node1 = gce.instance_data[0].name %} +{{node1}} ansible_ssh_host={{gce.instance_data[0].public_ip}} +{% if mode != "aio" %} +{% set node2 = gce.instance_data[1].name %} +{{node2}} ansible_ssh_host={{gce.instance_data[1].public_ip}} +{% endif %} +{% if mode is defined and mode in ["ha", "ha-scale", "separate", "separate-scale"] %} +{% set node3 = gce.instance_data[2].name %} +{{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}} +{% endif %} +{% if mode is defined and mode in ["separate", "separate-scale"] %} +[kube_control_plane] +{{node1}} + +[kube_node] +{{node2}} + +[etcd] +{{node3}} + +{% elif mode is defined and mode in ["ha", "ha-scale"] %} +[kube_control_plane] +{{node1}} +{{node2}} + +[kube_node] +{{node3}} + +[etcd] +{{node1}} +{{node2}} +{{node3}} + +[broken_kube_control_plane] +{{node2}} + +[etcd] +{{node2}} +{{node3}} +{% elif mode == "default" %} +[kube_control_plane] +{{node1}} + +[kube_node] +{{node2}} + +[etcd] +{{node1}} +{% elif mode == "aio" %} +[kube_control_plane] +{{node1}} + +[kube_node] +{{node1}} + +[etcd] +{{node1}} +{% endif %} + +[k8s_cluster:children] +kube_node +kube_control_plane +calico_rr + +[calico_rr] + +{% if mode is defined and mode in ["scale", "separate-scale", "ha-scale"] %} +[fake_hosts] +fake_scale_host[1:200] + +[kube_node:children] +fake_hosts +{% endif %} diff --git a/kubespray/tests/testcases/010_check-apiserver.yml b/kubespray/tests/testcases/010_check-apiserver.yml new file mode 100644 index 0000000..a0a09a4 --- /dev/null +++ b/kubespray/tests/testcases/010_check-apiserver.yml @@ -0,0 +1,23 @@ +--- +- hosts: kube_control_plane + + tasks: + - name: Check the API servers are responding + uri: + url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port | default(6443) }}/version" + validate_certs: no + status_code: 200 + register: apiserver_response + retries: 12 + delay: 5 + until: apiserver_response is success + + - debug: # noqa unnamed-task + msg: "{{ apiserver_response.json }}" + + - name: Check API servers version + assert: + that: + - apiserver_response.json.gitVersion == kube_version + fail_msg: "apiserver version different than expected {{ kube_version }}" + when: kube_version is defined diff --git a/kubespray/tests/testcases/015_check-nodes-ready.yml b/kubespray/tests/testcases/015_check-nodes-ready.yml new file mode 100644 index 0000000..1c3b977 --- /dev/null +++ b/kubespray/tests/testcases/015_check-nodes-ready.yml @@ -0,0 +1,35 @@ +--- +- hosts: kube_control_plane[0] + tasks: + + - name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + + - name: Force binaries directory for other hosts + set_fact: + bin_dir: "/usr/local/bin" + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + + - import_role: # noqa unnamed-task + name: cluster-dump + + - name: Check kubectl output + command: "{{ bin_dir }}/kubectl get nodes" + changed_when: false + register: get_nodes + no_log: true + + - debug: # noqa unnamed-task + msg: "{{ get_nodes.stdout.split('\n') }}" + + - name: Check that all nodes are running and ready + command: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml" + changed_when: false + register: get_nodes_yaml + until: + # Check that all nodes are Status=Ready + - '(get_nodes_yaml.stdout | from_yaml)["items"] | map(attribute = "status.conditions") | map("items2dict", key_name="type", value_name="status") | map(attribute="Ready") | list | min' + retries: 30 + delay: 10 diff --git a/kubespray/tests/testcases/020_check-pods-running.yml b/kubespray/tests/testcases/020_check-pods-running.yml new file mode 100644 index 0000000..46392d1 --- /dev/null +++ b/kubespray/tests/testcases/020_check-pods-running.yml @@ -0,0 +1,49 @@ +--- +- hosts: kube_control_plane[0] + tasks: + + - name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + + - name: Force binaries directory for other hosts + set_fact: + bin_dir: "/usr/local/bin" + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + + - import_role: # noqa unnamed-task + name: cluster-dump + + - name: Check kubectl output + command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" + changed_when: false + register: get_pods + no_log: true + + - debug: # noqa unnamed-task + msg: "{{ get_pods.stdout.split('\n') }}" + + - name: Check that all pods are running and ready + command: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml" + changed_when: false + register: run_pods_log + until: + # Check that all pods are running + - '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.phase") | unique | list == ["Running"]' + # Check that all pods are ready + - '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.containerStatuses") | map("map", attribute = "ready") | map("min") | min' + retries: 30 + delay: 10 + failed_when: false + no_log: true + + - name: Check kubectl output + command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" + changed_when: false + register: get_pods + no_log: true + + - debug: # noqa unnamed-task + msg: "{{ get_pods.stdout.split('\n') }}" + failed_when: not run_pods_log is success diff --git a/kubespray/tests/testcases/030_check-network.yml b/kubespray/tests/testcases/030_check-network.yml new file mode 100644 index 0000000..499064d --- /dev/null +++ b/kubespray/tests/testcases/030_check-network.yml @@ -0,0 +1,171 @@ +--- +- hosts: kube_control_plane[0] + vars: + test_image_repo: registry.k8s.io/e2e-test-images/agnhost + test_image_tag: "2.40" + + tasks: + - name: Force binaries directory for Flatcar Container Linux by Kinvolk + set_fact: + bin_dir: "/opt/bin" + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + + - name: Force binaries directory for other hosts + set_fact: + bin_dir: "/usr/local/bin" + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + + - name: Approve kubelet serving certificates + block: + + - name: Get certificate signing requests + command: "{{ bin_dir }}/kubectl get csr -o name" + register: get_csr + changed_when: false + + - name: Check there are csrs + assert: + that: get_csr.stdout_lines | length > 0 + fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found + + - name: Approve certificates + command: "{{ bin_dir }}/kubectl certificate approve {{ get_csr.stdout_lines | join(' ') }}" + register: certificate_approve + when: get_csr.stdout_lines | length > 0 + changed_when: certificate_approve.stdout + + - debug: # noqa unnamed-task + msg: "{{ certificate_approve.stdout.split('\n') }}" + + when: kubelet_rotate_server_certificates | default(false) + + - name: Create test namespace + command: "{{ bin_dir }}/kubectl create namespace test" + changed_when: false + + - name: Wait for API token of test namespace + shell: "set -o pipefail && {{ bin_dir }}/kubectl describe serviceaccounts default --namespace test | grep Tokens | awk '{print $2}'" + args: + executable: /bin/bash + changed_when: false + register: default_token + until: default_token.stdout | length > 0 + retries: 5 + delay: 5 + + - name: Run 2 agnhost pods in test ns + shell: + cmd: | + cat <= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2 + retries: 3 + delay: 10 + failed_when: false + when: inventory_hostname == groups['kube_control_plane'][0] + + - name: Get netchecker pods + command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + no_log: false + with_items: + - netchecker-agent + - netchecker-agent-hostnet + when: not nca_pod is success + + - debug: # noqa unnamed-task + var: nca_pod.stdout_lines + when: inventory_hostname == groups['kube_control_plane'][0] + + - name: Get netchecker agents + uri: + url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/" + return_content: yes + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + register: agents + retries: 18 + delay: "{{ agent_report_interval }}" + until: agents.content|length > 0 and + agents.content[0] == '{' and + agents.content|from_json|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2 + failed_when: false + no_log: false + + - name: Check netchecker status + uri: + url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check" + status_code: 200 + return_content: yes + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + register: connectivity_check + retries: 3 + delay: "{{ agent_report_interval }}" + until: connectivity_check.content|length > 0 and + connectivity_check.content[0] == '{' + no_log: false + failed_when: false + when: + - agents.content != '{}' + + - debug: # noqa unnamed-task + var: ncs_pod + run_once: true + + - name: Get kube-proxy logs + command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy" + no_log: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not connectivity_check is success + + - name: Get logs from other apps + command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not connectivity_check is success + no_log: false + with_items: + - kube-router + - flannel + - canal-node + - calico-node + - cilium + + - name: Parse agents list + set_fact: + agents_check_result: "{{ agents.content | from_json }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: + - agents is success + - agents.content is defined + - agents.content[0] == '{' + + - debug: # noqa unnamed-task + var: agents_check_result + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: + - agents_check_result is defined + + - name: Parse connectivity check + set_fact: + connectivity_check_result: "{{ connectivity_check.content | from_json }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: + - connectivity_check is success + - connectivity_check.content is defined + - connectivity_check.content[0] == '{' + + - debug: # noqa unnamed-task + var: connectivity_check_result + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + when: + - connectivity_check_result is defined + + - name: Check connectivity with all netchecker agents + assert: + that: + - agents_check_result is defined + - connectivity_check_result is defined + - agents_check_result.keys() | length > 0 + - not connectivity_check_result.Absent + - not connectivity_check_result.Outdated + msg: "Connectivity check to netchecker agents failed" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + run_once: true + + - name: Create macvlan network conf + # We cannot use only shell: below because Ansible will render the text + # with leading spaces, which means the shell will never find the string + # EOF at the beginning of a line. We can avoid Ansible's unhelpful + # heuristics by using the cmd parameter like this: + shell: + cmd: | + cat <