This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

37
ansible/inventory.ini Normal file
View File

@@ -0,0 +1,37 @@
[host]
10.10.43.111
10.10.43.112
10.10.43.113
10.10.43.114
10.10.43.115
10.10.43.116
10.10.43.117
10.10.43.118
10.10.43.119
10.10.43.120
10.10.43.121
10.10.43.122
10.10.43.123
10.10.43.124
10.10.43.125
10.10.43.126
10.10.43.127
10.10.43.128
10.10.43.129
10.10.43.130
10.10.43.131
10.10.43.132
10.10.43.133
10.10.43.134
10.10.43.135
10.10.43.137
10.10.43.138
10.10.43.140
10.10.43.141
10.10.43.142
10.10.43.143
10.10.43.144
10.10.43.145
10.10.43.146
10.10.43.147

7
ansible/node.yaml Executable file
View File

@@ -0,0 +1,7 @@
---
- name: check ls
hosts: all
become: true
roles:
- node

View File

@@ -0,0 +1,12 @@
---
- name: echo hello
command: echo "Not Valid Ruby Version"
- name: Update apt repo and cache on all Debian/Ubuntu boxes
apt: update_cache=yes cache_valid_time=3600
- name: Install cifs-utils
apt: name=cifs-utils state=latest update_cache=yes
- name: Install nfs-common
apt: name=nfs-common state=latest update_cache=yes

View File

@@ -0,0 +1,43 @@
# Password aging settings
os_auth_pw_max_age: 90
os_auth_pw_min_age: 10
os_auth_pw_warn_age: 7
passhistory: 2
# Inactivity and Failed attempts lockout settings
fail_deny: 5
fail_unlock: 0
inactive_lock: 0
shell_timeout: 300
# tally settings
onerr: 'fail'
deny: 5
unlock_time: 300
# Password complexity settings
pwquality_minlen: 9
pwquality_maxrepeat: 3
pwquality_lcredit: -1
pwquality_ucredit: -1
pwquality_dcredit: -1
pwquality_ocredit: -1
# SSH settings
sshrootlogin: 'forced-commands-only'
sshmainport: 22
ssh_service_name: sshd
# Crictl setup
crictl_app: crictl
crictl_version: 1.25.0
crictl_os: linux
crictl_arch: amd64
crictl_dl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/{{ crictl_app }}-v{{ crictl_version }}-{{ crictl_os }}-{{ crictl_arch }}.tar.gz
crictl_bin_path: /usr/local/bin
crictl_file_owner: root
crictl_file_group: root
# temp
username:
password:

View File

@@ -0,0 +1,39 @@
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
[grpc]
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
level = "info"
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.7"
max_container_log_line_size = -1
enable_unprivileged_ports = false
enable_unprivileged_icmp = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
runtime_engine = ""
runtime_root = ""
base_runtime_spec = "/etc/containerd/cri-base.json"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
systemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]

View File

@@ -0,0 +1,60 @@
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
[grpc]
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
level = "info"
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.7"
max_container_log_line_size = -1
enable_unprivileged_ports = false
enable_unprivileged_icmp = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
runtime_engine = ""
runtime_root = ""
base_runtime_spec = "/etc/containerd/cri-base.json"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
systemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.10.31.243:5000"]
endpoint = ["http://10.10.31.243:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.10.43.240:30500"]
endpoint = ["http://10.10.43.240:30500"]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.31.243:5000".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.31.243:5000".auth]
username = "core"
password = "coreadmin1234"
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.43.240:30500".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.43.240:30500".auth]
username = "dsk"
password = "dskadmin1234"
[plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io".auth]
username = "datasaker"
password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc"

View File

@@ -0,0 +1,20 @@
#!/bin/sh
printf '''
|-----------------------------------------------------------------|
| This system is for the use of authorized users only. |
| Individuals using this computer system without authority, or in |
| excess of their authority, are subject to having all of their |
| activities on this system monitored and recorded by system |
| personnel. |
| |
| In the course of monitoring individuals improperly using this |
| system, or in the course of system maintenance, the activities |
| of authorized users may also be monitored. |
| |
| Anyone using this system expressly consents to such monitoring |
| and is advised that if such monitoring reveals possible |
| evidence of criminal activity, system personnel may provide the |
| evidence of such monitoring to law enforcement officials. |
|-----------------------------------------------------------------|
'''

View File

@@ -0,0 +1,3 @@
#[Manager]
#DefaultLimitNOFILE=65535:65535
#DefaultLimitNPROC=65536:65536

View File

@@ -0,0 +1,6 @@
---
- name: restart sshd
service:
name: "{{ ssh_service_name }}"
state: restarted
enabled: true

View File

@@ -0,0 +1,14 @@
---
- name: key add
authorized_key:
user: ubuntu
state: present
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
manage_dir: False
- name: user change
user:
name: "{{ username }}"
password: "{{ password | password_hash('sha512') }}"
state: present

View File

@@ -0,0 +1,29 @@
---
- name: Create a tar.gz archive of a single file.
archive:
path: /etc/update-motd.d/*
dest: /etc/update-motd.d/motd.tar.gz
format: gz
force_archive: true
- name: remove a motd.d files
file:
path: /etc/update-motd.d/{{ item }}
state: absent
with_items:
- 10-help-text
- 85-fwupd
- 90-updates-available
- 91-release-upgrade
- 95-hwe-eol
- 98-fsck-at-reboot
- 50-motd-news
- 88-esm-announce
- name: Create login banner
copy:
src: login_banner
dest: /etc/update-motd.d/00-header
owner: root
group: root
mode: 0755

View File

@@ -0,0 +1,47 @@
---
#- name: Downloading and extracting {{ crictl_app }} {{ crictl_version }}
# unarchive:
# src: "{{ crictl_dl_url }}"
# dest: "{{ crictl_bin_path }}"
# owner: "{{ crictl_file_owner }}"
# group: "{{ crictl_file_group }}"
# extra_opts:
# - crictl
# remote_src: yes
- name: Change containerd config
copy:
src: containerd_dsk_config.toml
dest: /etc/containerd/config.toml
owner: root
group: root
mode: 0640
- name: Restart service containerd
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: containerd
- name: remove all cronjobs for user root
command: crontab -r -u root
ignore_errors: true
- name: Crictl command crontab setting
ansible.builtin.cron:
name: "container container prune"
minute: "0"
hour: "3"
user: root
job: "for id in `crictl ps -a | grep -i exited | awk '{print $1}'`; do crictl rm $id ; done"
- name: Crictl command crontab setting
ansible.builtin.cron:
name: "container image prune"
minute: "10"
hour: "3"
user: root
job: "/usr/local/bin/crictl rmi --prune"

View File

@@ -0,0 +1,48 @@
---
- name: Set pass max days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MAX_DAYS.*$'
line: "PASS_MAX_DAYS\t{{os_auth_pw_max_age}}"
backrefs: yes
- name: Set pass min days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MIN_DAYS.*$'
line: "PASS_MIN_DAYS\t{{os_auth_pw_min_age}}"
backrefs: yes
- name: Set pass min length
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MIN_LEN.*$'
line: "PASS_MIN_LEN\t{{pwquality_minlen}}"
backrefs: yes
- name: Set pass warn days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_WARN_AGE.*$'
line: "PASS_WARN_AGE\t{{os_auth_pw_warn_age}}"
backrefs: yes
- name: Set password encryption to SHA512
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^ENCRYPT_METHOD\s.*$'
line: "ENCRYPT_METHOD\tSHA512"
backrefs: yes
- name: Disable MD5 crypt explicitly
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^MD5_CRYPT_ENAB.*$'
line: "MD5_CRYPT_ENAB NO"
backrefs: yes

View File

@@ -0,0 +1,21 @@
---
- include: login_defs.yml
tags: login_defs
- include: pam.yml
tags: pam
- include: sshd_config.yml
tags: sshd_config
- include: profile.yml
tags: profile
- include: banner.yml
tags: banner
- include: crictl.yml
tags: circtl
#- include: admin_set.yml
# tags: admin_set

View File

@@ -0,0 +1,82 @@
---
- name: Add pam_tally2.so
template:
src: common-auth.j2
dest: /etc/pam.d/common-auth
owner: root
group: root
mode: 0644
- name: Create pwquality.conf password complexity configuration
block:
- apt:
name: libpam-pwquality
state: present
install_recommends: false
- template:
src: pwquality.conf.j2
dest: /etc/security/pwquality.conf
owner: root
group: root
mode: 0644
- name: Add pam_tally2.so
block:
- lineinfile:
dest: /etc/pam.d/common-account
regexp: '^account\srequisite'
line: "account requisite pam_deny.so"
- lineinfile:
dest: /etc/pam.d/common-account
regexp: '^account\srequired'
line: "account required pam_tally2.so"
- name: password reuse is limited
lineinfile:
dest: /etc/pam.d/common-password
line: "password required pam_pwhistory.so remember=5"
- name: password hashing algorithm is SHA-512
lineinfile:
dest: /etc/pam.d/common-password
regexp: '^password\s+\[success'
line: "password [success=1 default=ignore] pam_unix.so sha512"
- name: Shadow Password Suite Parameters
lineinfile:
dest: /etc/pam.d/common-password
regexp: '^password\s+\[success'
line: "password [success=1 default=ignore] pam_unix.so sha512"
#- name: configure system settings, file descriptors and number of threads
# pam_limits:
# domain: '*'
# limit_type: "{{item.limit_type}}"
# limit_item: "{{item.limit_item}}"
# value: "{{item.value}}"
# with_items:
# - { limit_type: '-', limit_item: 'nofile', value: 65536 }
# - { limit_type: '-', limit_item: 'nproc', value: 65536 }
## - { limit_type: 'soft', limit_item: 'memlock', value: unlimited }
## - { limit_type: 'hard', limit_item: 'memlock', value: unlimited }
#- name: reload settings from all system configuration files
# shell: sysctl --system
#- name: Creates directory systemd config
# file:
# path: /etc/systemd/system.conf.d
# state: directory
# owner: root
# group: root
# mode: 0775
#- name: Create systemd limits
# copy:
# src: systemd_limit.conf
# dest: /etc/systemd/system.conf.d/limits.conf
# owner: root
# group: root
# mode: 644

View File

@@ -0,0 +1,24 @@
---
- name: Set session timeout
lineinfile:
dest: /etc/profile
regexp: '^TMOUT=.*'
insertbefore: '^readonly TMOUT'
line: 'TMOUT={{shell_timeout}}'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"
- name: Set TMOUT readonly
lineinfile:
dest: /etc/profile
regexp: '^readonly TMOUT'
insertafter: 'TMOUT={{shell_timeout}}'
line: 'readonly TMOUT'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"
- name: Set export TMOUT
lineinfile:
dest: /etc/profile
regexp: '^export TMOUT.*'
insertafter: 'readonly TMOUT'
line: 'export TMOUT'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"

View File

@@ -0,0 +1,23 @@
---
- name: Configure ssh root login to {{sshrootlogin}}
lineinfile:
dest: /etc/ssh/sshd_config
regexp: '^(#)?PermitRootLogin.*'
line: 'PermitRootLogin {{sshrootlogin}}'
insertbefore: '^Match.*'
state: present
owner: root
group: root
mode: 0640
notify: restart sshd
- name: SSH Listen on Main Port
lineinfile:
dest: /etc/ssh/sshd_config
insertbefore: '^#*AddressFamily'
line: 'Port {{sshmainport}}'
state: present
owner: root
group: root
mode: 0640
notify: restart sshd

View File

@@ -0,0 +1,27 @@
#
# /etc/pam.d/common-auth - authentication settings common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}}
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
auth required pam_permit.so
# since the modules above will each just jump around
# and here are more per-package modules (the "Additional" block)
auth optional pam_cap.so
# end of pam-auth-update config

View File

@@ -0,0 +1,50 @@
# Configuration for systemwide password quality limits
# Defaults:
#
# Number of characters in the new password that must not be present in the
# old password.
# difok = 5
#
# Minimum acceptable size for the new password (plus one if
# credits are not disabled which is the default). (See pam_cracklib manual.)
# Cannot be set to lower value than 6.
minlen = {{pwquality_minlen}}
#
# The maximum credit for having digits in the new password. If less than 0
# it is the minimum number of digits in the new password.
dcredit = {{pwquality_dcredit}}
#
# The maximum credit for having uppercase characters in the new password.
# If less than 0 it is the minimum number of uppercase characters in the new
# password.
ucredit = {{pwquality_ucredit}}
#
# The maximum credit for having lowercase characters in the new password.
# If less than 0 it is the minimum number of lowercase characters in the new
# password.
lcredit = {{pwquality_lcredit}}
#
# The maximum credit for having other characters in the new password.
# If less than 0 it is the minimum number of other characters in the new
# password.
ocredit = {{pwquality_ocredit}}
#
# The minimum number of required classes of characters for the new
# password (digits, uppercase, lowercase, others).
# minclass = 0
#
# The maximum number of allowed consecutive same characters in the new password.
# The check is disabled if the value is 0.
maxrepeat = {{pwquality_maxrepeat}}
#
# The maximum number of allowed consecutive characters of the same class in the
# new password.
# The check is disabled if the value is 0.
# maxclassrepeat = 0
#
# Whether to check for the words from the passwd entry GECOS string of the user.
# The check is enabled if the value is not 0.
# gecoscheck = 0
#
# Path to the cracklib dictionaries. Default is to use the cracklib default.
# dictpath =

9
ansible/rsa_key/key.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/expect -f
set password [lindex $argv 0]
set host [lindex $argv 1]
spawn ssh-copy-id -o StrictHostKeyChecking=no ubuntu@$host
expect "password:"
send "$password\n"
expect eof

13
ansible/rsa_key/test.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi
if [ $1 == '' ]; then exit
else; passwd=$1
while read ip
do
echo ${ip}
#./key.sh ${passwd} ${ip}
done < ip_list

9
ansible/security.yaml Executable file
View File

@@ -0,0 +1,9 @@
---
- name: check ls
hosts: all
become: true
roles:
- security-settings
vars:
sshrootlogin: 'no'

View File

@@ -0,0 +1,25 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-large
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.4xlarge
manager: CloudGroup
maxSize: 2
minSize: 2
nodeLabels:
datasaker/druid-size: large
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-large
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-druid:NoSchedule

View File

@@ -0,0 +1,22 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-tmp
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.2xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: tmp
kops.k8s.io/instancegroup: k8s-prod-tmp
role: Node
subnets:
- ap-northeast-2c

30
kubespray/.ansible-lint Normal file
View File

@@ -0,0 +1,30 @@
---
parseable: true
skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
# These rules are intentionally skipped:
#
# [E204]: "Lines should be no longer than 160 chars"
# This could be re-enabled with a major rewrite in the future.
# For now, there's not enough value gain from strictly limiting line length.
# (Disabled in May 2019)
- '204'
# [E701]: "meta/main.yml should contain relevant info"
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
# While it can be useful to have these metadata available, they are also available in the existing documentation.
# (Disabled in May 2019)
- '701'
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
# Meta roles in Kubespray don't need proper names
# (Disabled in June 2021)
- 'role-name'
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
# In Kubespray we use variables that use camelCase to match their k8s counterparts
# (Disabled in June 2021)
- 'var-naming'

15
kubespray/.editorconfig Normal file
View File

@@ -0,0 +1,15 @@
root = true
[*.{yaml,yml,yml.j2,yaml.j2}]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8
[{Dockerfile}]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8

View File

@@ -0,0 +1,44 @@
---
name: Bug Report
about: Report a bug encountered while operating Kubernetes
labels: kind/bug
---
<!--
Please, be ready for followup questions, and please respond in a timely
manner. If we can't reproduce a bug or think a feature already exists, we
might close your issue. If we're wrong, PLEASE feel free to reopen it and
explain why.
-->
**Environment**:
- **Cloud provider or hardware configuration:**
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
- **Version of Ansible** (`ansible --version`):
- **Version of Python** (`python --version`):
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
**Network plugin used**:
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
**Command used to invoke ansible**:
**Output of ansible run**:
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
**Anything else do we need to know**:
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
Script can be started by:
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->

View File

@@ -0,0 +1,11 @@
---
name: Enhancement Request
about: Suggest an enhancement to the Kubespray project
labels: kind/feature
---
<!-- Please only use this template for submitting enhancement requests -->
**What would you like to be added**:
**Why is this needed**:

View File

@@ -0,0 +1,20 @@
---
name: Failing Test
about: Report test failures in Kubespray CI jobs
labels: kind/failing-test
---
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
**Which jobs are failing**:
**Which test(s) are failing**:
**Since when has it been failing**:
**Testgrid link**:
**Reason for failure**:
**Anything else we need to know**:

View File

@@ -0,0 +1,18 @@
---
name: Support Request
about: Support request or question relating to Kubespray
labels: kind/support
---
<!--
STOP -- PLEASE READ!
GitHub is not the right place for support requests.
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
-->

View File

@@ -0,0 +1,44 @@
<!-- Thanks for sending a pull request! Here are some tips for you:
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide/first-contribution.md and developer guide https://git.k8s.io/community/contributors/devel/development.md
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/sig-testing/testing.md
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
-->
**What type of PR is this?**
> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line:
>
> /kind api-change
> /kind bug
> /kind cleanup
> /kind design
> /kind documentation
> /kind failing-test
> /kind feature
> /kind flake
**What this PR does / why we need it**:
**Which issue(s) this PR fixes**:
<!--
*Automatically closes linked issue when PR is merged.
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.
_If PR is about `failing-tests or flakes`, please post the related issues/tests in a comment and do not use `Fixes`_*
-->
Fixes #
**Special notes for your reviewer**:
**Does this PR introduce a user-facing change?**:
<!--
If no, just write "NONE" in the release-note block below.
If yes, a release note is required:
Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required".
-->
```release-note
```

115
kubespray/.gitignore vendored Normal file
View File

@@ -0,0 +1,115 @@
.vagrant
*.retry
**/vagrant_ansible_inventory
*.iml
temp
contrib/offline/offline-files
contrib/offline/offline-files.tar.gz
.idea
.vscode
.tox
.cache
*.bak
*.tfstate
*.tfstate.backup
.terraform/
contrib/terraform/aws/credentials.tfvars
.terraform.lock.hcl
/ssh-bastion.conf
**/*.sw[pon]
*~
vagrant/
plugins/mitogen
# Ansible inventory
inventory/*
!inventory/local
!inventory/sample
inventory/*/artifacts/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
env/
build/
credentials/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# dotenv
.env
# virtualenv
venv/
ENV/
# molecule
roles/**/molecule/**/__pycache__/
# macOS
.DS_Store
# Temp location used by our scripts
scripts/tmp/
tmp.md

84
kubespray/.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,84 @@
---
stages:
- unit-tests
- deploy-part1
- moderator
- deploy-part2
- deploy-part3
- deploy-special
variables:
KUBESPRAY_VERSION: v2.20.0
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true"
MAGIC: "ci check this"
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET
CONTAINER_ENGINE: docker
SSH_USER: root
GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
REMOVE_NODE_CHECK: "false"
UPGRADE_TEST: "false"
MITOGEN_ENABLE: "false"
ANSIBLE_LOG_LEVEL: "-vv"
RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
TERRAFORM_VERSION: 1.0.8
ANSIBLE_MAJOR_VERSION: "2.11"
before_script:
- ./tests/scripts/rebase.sh
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
- mkdir -p /.ssh
.job: &job
tags:
- packet
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
artifacts:
when: always
paths:
- cluster-dump/
.testcases: &testcases
<<: *job
retry: 1
before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh
script:
- ./tests/scripts/testcases_run.sh
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
# Premoderated with manual actions
ci-authorized:
extends: .job
stage: moderator
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
# Disable ci moderator
only: []
include:
- .gitlab-ci/lint.yml
- .gitlab-ci/shellcheck.yml
- .gitlab-ci/terraform.yml
- .gitlab-ci/packet.yml
- .gitlab-ci/vagrant.yml
- .gitlab-ci/molecule.yml

View File

@@ -0,0 +1,90 @@
---
yamllint:
extends: .job
stage: unit-tests
tags: [light]
variables:
LANG: C.UTF-8
script:
- yamllint --strict .
except: ['triggers', 'master']
vagrant-validate:
extends: .job
stage: unit-tests
tags: [light]
variables:
VAGRANT_VERSION: 2.2.19
script:
- ./tests/scripts/vagrant-validate.sh
except: ['triggers', 'master']
ansible-lint:
extends: .job
stage: unit-tests
tags: [light]
script:
- ansible-lint -v
except: ['triggers', 'master']
syntax-check:
extends: .job
stage: unit-tests
tags: [light]
variables:
ANSIBLE_INVENTORY: inventory/local-tests.cfg
ANSIBLE_REMOTE_USER: root
ANSIBLE_BECOME: "true"
ANSIBLE_BECOME_USER: root
ANSIBLE_VERBOSITY: "3"
script:
- ansible-playbook --syntax-check cluster.yml
- ansible-playbook --syntax-check upgrade-cluster.yml
- ansible-playbook --syntax-check reset.yml
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
except: ['triggers', 'master']
tox-inventory-builder:
stage: unit-tests
tags: [light]
extends: .job
before_script:
- ./tests/scripts/rebase.sh
- apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt
script:
- pip3 install tox
- cd contrib/inventory_builder && tox
except: ['triggers', 'master']
markdownlint:
stage: unit-tests
tags: [light]
image: node
before_script:
- npm install -g markdownlint-cli@0.22.0
script:
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
check-readme-versions:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_readme_versions.sh
check-typo:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_typo.sh
ci-matrix:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/md-table/test.sh

View File

@@ -0,0 +1,86 @@
---
.molecule:
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
services: []
stage: deploy-part1
before_script:
- tests/scripts/rebase.sh
- apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/molecule_run.sh
after_script:
- chronic ./tests/scripts/molecule_logs.sh
artifacts:
when: always
paths:
- molecule_logs/
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.molecule_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .molecule
molecule_full:
extends: .molecule_periodic
molecule_no_container_engines:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -e container-engine
when: on_success
molecule_docker:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
when: on_success
molecule_containerd:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
when: on_success
molecule_cri-o:
extends: .molecule
stage: deploy-part2
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
when: on_success
# Stage 3 container engines don't get as much attention so allow them to fail
molecule_kata:
extends: .molecule
stage: deploy-part3
allow_failure: true
script:
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
when: on_success
molecule_gvisor:
extends: .molecule
stage: deploy-part3
allow_failure: true
script:
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
when: on_success
molecule_youki:
extends: .molecule
stage: deploy-part3
allow_failure: true
script:
- ./tests/scripts/molecule_run.sh -i container-engine/youki
when: on_success

View File

@@ -0,0 +1,328 @@
---
.packet:
extends: .testcases
variables:
ANSIBLE_TIMEOUT: "120"
CI_PLATFORM: packet
SSH_USER: kubespray
tags:
- packet
except: [triggers]
# CI template for PRs
.packet_pr:
only: [/^pr-.*$/]
extends: .packet
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.packet_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .packet
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
packet_ubuntu20-calico-aio:
stage: deploy-part1
extends: .packet_pr
when: on_success
variables:
RESET_CHECK: "true"
packet_ubuntu20-calico-aio-ansible-2_11:
stage: deploy-part1
extends: .packet_periodic
when: on_success
variables:
ANSIBLE_MAJOR_VERSION: "2.11"
RESET_CHECK: "true"
# ### PR JOBS PART2
packet_ubuntu18-aio-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-aio-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-aio-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu18-calico-aio:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-aio-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-calico-aio:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr
stage: deploy-part2
when: on_success
packet_almalinux8-crio:
extends: .packet_pr
stage: deploy-part2
when: on_success
packet_ubuntu18-crio:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_fedora35-crio:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_ubuntu16-canal-ha:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_ubuntu16-canal-sep:
stage: deploy-special
extends: .packet_pr
when: manual
packet_ubuntu16-flannel-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian10-cilium-svc-proxy:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_debian10-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian10-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-calico-ha-once-localhost:
stage: deploy-part2
extends: .packet_pr
when: on_success
variables:
# This will instruct Docker not to start over TLS.
DOCKER_TLS_CERTDIR: ""
services:
- docker:19.03.9-dind
packet_almalinux8-kube-ovn:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_almalinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora36-docker-weave:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_opensuse-canal:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_opensuse-docker-cilium:
stage: deploy-part2
extends: .packet_pr
when: manual
# ### MANUAL JOBS
packet_ubuntu16-docker-weave-sep:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu18-cilium-sep:
stage: deploy-special
extends: .packet_pr
when: manual
packet_ubuntu18-flannel-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu18-flannel-ha-once:
stage: deploy-part2
extends: .packet_pr
when: manual
# Calico HA eBPF
packet_almalinux8-calico-ha-ebpf:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian9-macvlan:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_centos7-calico-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_centos7-multus-calico:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_centos7-canal-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_fedora36-docker-calico:
stage: deploy-part2
extends: .packet_periodic
when: on_success
variables:
RESET_CHECK: "true"
packet_fedora35-calico-selinux:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_fedora35-calico-swap-selinux:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_amazon-linux-2-aio:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_almalinux8-calico-nodelocaldns-secondary:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_fedora36-kube-ovn:
stage: deploy-part2
extends: .packet_periodic
when: on_success
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
# Calico HA Wireguard
packet_ubuntu20-calico-ha-wireguard:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-calico-upgrade:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
UPGRADE_TEST: graceful
packet_almalinux8-calico-remove-node:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_ubuntu20-calico-etcd-kubeadm:
stage: deploy-part3
extends: .packet_pr
when: on_success
packet_debian11-calico-upgrade-once:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: graceful
packet_ubuntu18-calico-ha-recover:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
packet_ubuntu18-calico-ha-recover-noquorum:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"

View File

@@ -0,0 +1,16 @@
---
shellcheck:
extends: .job
stage: unit-tests
tags: [light]
variables:
SHELLCHECK_VERSION: v0.7.1
before_script:
- ./tests/scripts/rebase.sh
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
- shellcheck --version
script:
# Run shellcheck for all *.sh
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
except: ['triggers', 'master']

View File

@@ -0,0 +1,235 @@
---
# Tests for contrib/terraform/
.terraform_install:
extends: .job
before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh
- ./tests/scripts/terraform_install.sh
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Prepare inventory
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
- ln -s contrib/terraform/$PROVIDER/hosts
- terraform -chdir="contrib/terraform/$PROVIDER" init
# Copy SSH keypair
- mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
- mkdir -p contrib/terraform/$PROVIDER/group_vars
# Random subnet to avoid routing conflicts
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
.terraform_validate:
extends: .terraform_install
stage: unit-tests
tags: [light]
only: ['master', /^pr-.*$/]
script:
- terraform -chdir="contrib/terraform/$PROVIDER" validate
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
.terraform_apply:
extends: .terraform_install
tags: [light]
stage: deploy-part3
when: manual
only: [/^pr-.*$/]
artifacts:
when: always
paths:
- cluster-dump/
variables:
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
ANSIBLE_INVENTORY: hosts
CI_PLATFORM: tf
TF_VAR_ssh_user: $SSH_USER
TF_VAR_cluster_name: $CI_JOB_ID
script:
- tests/scripts/testcases_run.sh
after_script:
# Cleanup regardless of exit code
- chronic ./tests/scripts/testcases_cleanup.sh
tf-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-metal:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: metal
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-aws:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: exoscale
tf-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-upcloud:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: upcloud
CLUSTER: $CI_COMMIT_REF_NAME
# tf-packet-ubuntu16-default:
# extends: .terraform_apply
# variables:
# TF_VERSION: $TERRAFORM_VERSION
# PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1"
# TF_VAR_number_of_k8s_nodes: "1"
# TF_VAR_plan_k8s_masters: t1.small.x86
# TF_VAR_plan_k8s_nodes: t1.small.x86
# TF_VAR_facility: ewr1
# TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_16_04
#
# tf-packet-ubuntu18-default:
# extends: .terraform_apply
# variables:
# TF_VERSION: $TERRAFORM_VERSION
# PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1"
# TF_VAR_number_of_k8s_nodes: "1"
# TF_VAR_plan_k8s_masters: t1.small.x86
# TF_VAR_plan_k8s_nodes: t1.small.x86
# TF_VAR_facility: ams1
# TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_18_04
.ovh_variables: &ovh_variables
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
OS_PROJECT_NAME: "9361447987648822"
OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: 8XuhBMfkKVrk
OS_REGION_NAME: UK1
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
# Elastx is generously donating resources for Kubespray on Openstack CI
# Contacts: @gix @bl0m1
.elastx_variables: &elastx_variables
OS_AUTH_URL: https://ops.elastx.cloud:5000
OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4
OS_PROJECT_NAME: kubespray_ci
OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: kubespray@root314.com
OS_REGION_NAME: se-sto
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
tf-elastx_cleanup:
stage: unit-tests
tags: [light]
image: python
variables:
<<: *elastx_variables
before_script:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
tf-elastx_ubuntu18-calico:
extends: .terraform_apply
stage: deploy-part3
when: on_success
allow_failure: true
variables:
<<: *elastx_variables
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "0"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_floatingip_pool: "elx-public1"
TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]'
TF_VAR_use_access_ip: "0"
TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828"
TF_VAR_network_name: "ci-$CI_JOB_ID"
TF_VAR_az_list: '["sto1"]'
TF_VAR_az_list_node: '["sto1"]'
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_image: ubuntu-18.04-server-latest
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# OVH voucher expired, commenting job until things are sorted out
# tf-ovh_cleanup:
# stage: unit-tests
# tags: [light]
# image: python
# environment: ovh
# variables:
# <<: *ovh_variables
# before_script:
# - pip install -r scripts/openstack-cleanup/requirements.txt
# script:
# - ./scripts/openstack-cleanup/main.py
# tf-ovh_ubuntu18-calico:
# extends: .terraform_apply
# when: on_success
# environment: ovh
# variables:
# <<: *ovh_variables
# TF_VERSION: $TERRAFORM_VERSION
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 18.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View File

@@ -0,0 +1,67 @@
---
.vagrant:
extends: .testcases
variables:
CI_PLATFORM: "vagrant"
SSH_USER: "vagrant"
VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
services: []
before_script:
- apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/testcases_run.sh
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
allow_failure: true
vagrant_ubuntu18-calico-dual-stack:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_ubuntu18-flannel:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_ubuntu18-weave-medium:
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_ubuntu20-flannel:
stage: deploy-part2
extends: .vagrant
when: on_success
allow_failure: false
vagrant_ubuntu16-kube-router-sep:
stage: deploy-part2
extends: .vagrant
when: manual
# Service proxy test fails connectivity testing
vagrant_ubuntu16-kube-router-svc-proxy:
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_fedora35-kube-router:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_centos7-kube-router:
stage: deploy-part2
extends: .vagrant
when: manual

0
kubespray/.gitmodules vendored Normal file
View File

View File

@@ -0,0 +1,3 @@
---
MD013: false
MD029: false

0
kubespray/.nojekyll Normal file
View File

View File

@@ -0,0 +1,48 @@
---
repos:
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.27.1
hooks:
- id: yamllint
args: [--strict]
- repo: https://github.com/markdownlint/markdownlint
rev: v0.11.0
hooks:
- id: markdownlint
args: [ -r, "~MD013,~MD029" ]
exclude: "^.git"
- repo: local
hooks:
- id: ansible-lint
name: ansible-lint
entry: ansible-lint -v
language: python
pass_filenames: false
additional_dependencies:
- .[community]
- id: ansible-syntax-check
name: ansible-syntax-check
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
language: python
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
- id: tox-inventory-builder
name: tox-inventory-builder
entry: bash -c "cd contrib/inventory_builder && tox"
language: python
pass_filenames: false
- id: check-readme-versions
name: check-readme-versions
entry: tests/scripts/check_readme_versions.sh
language: script
pass_filenames: false
- id: ci-matrix
name: ci-matrix
entry: tests/scripts/md-table/test.sh
language: script
pass_filenames: false

19
kubespray/.yamllint Normal file
View File

@@ -0,0 +1,19 @@
---
extends: default
ignore: |
.git/
rules:
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 1
indentation:
spaces: 2
indent-sequences: consistent
line-length: disable
new-line-at-end-of-file: disable
truthy: disable

1
kubespray/CNAME Normal file
View File

@@ -0,0 +1 @@
kubespray.io

46
kubespray/CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,46 @@
# Contributing guidelines
## How to become a contributor and submit your own code
### Environment setup
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
To install development dependencies you can set up a python virtual env with the necessary dependencies:
```ShellSession
virtualenv venv
source venv/bin/activate
pip install -r tests/requirements.txt
```
#### Linting
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
```ShellSession
pre-commit install
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
```
#### Molecule
[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`).
When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment.
#### Vagrant
Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md)
### Contributing A Patch
1. Submit an issue describing your proposed change to the repo in question.
2. The [repo owners](OWNERS) will respond to your issue promptly.
3. Fork the desired repo, develop and test your code changes.
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
5. Addess any pre-commit validation failures.
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
7. Submit a pull request.
8. Work with the reviewers on their suggestions.
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.

37
kubespray/Dockerfile Normal file
View File

@@ -0,0 +1,37 @@
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
FROM ubuntu:focal-20220531
ARG ARCH=amd64
ARG TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN apt update -y \
&& apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
&& rm -rf /var/lib/apt/lists/*
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository \
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" \
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
&& rm -rf /var/lib/apt/lists/*
# Some tools like yamllint need this
# Pip needs this as well at the moment to install ansible
# (and potentially other packages)
# See: https://github.com/pypa/pip/issues/10219
ENV LANG=C.UTF-8
WORKDIR /kubespray
COPY . .
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
&& python3 -m pip install --no-cache-dir -r requirements.txt \
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
&& chmod a+x kubectl \
&& mv kubectl /usr/local/bin/kubectl

201
kubespray/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Kubespray
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

7
kubespray/Makefile Normal file
View File

@@ -0,0 +1,7 @@
mitogen:
@echo Mitogen support is deprecated.
@echo Please run the following command manually:
@echo ansible-playbook -c local mitogen.yml -vv
clean:
rm -rf dist/
rm *.retry

8
kubespray/OWNERS Normal file
View File

@@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- kubespray-approvers
reviewers:
- kubespray-reviewers
emeritus_approvers:
- kubespray-emeritus_approvers

26
kubespray/OWNERS_ALIASES Normal file
View File

@@ -0,0 +1,26 @@
aliases:
kubespray-approvers:
- mattymo
- chadswen
- mirwan
- miouge1
- luckysb
- floryut
- oomichi
- cristicalin
- liupeng0518
- yankay
kubespray-reviewers:
- holmsten
- bozzo
- eppo
- oomichi
- jayonlau
- cristicalin
- liupeng0518
- yankay
kubespray-emeritus_approvers:
- riverzhang
- atoms
- ant31
- woopstar

261
kubespray/README.md Normal file
View File

@@ -0,0 +1,261 @@
# Deploy a Production Ready Kubernetes Cluster
![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png)
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
You can get your invite [here](http://slack.k8s.io/)
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
- **Highly available** cluster
- **Composable** (Choice of the network plugin for instance)
- Supports most popular **Linux distributions**
- **Continuous integration tests**
## Quick Start
To deploy the cluster you can use :
### Ansible
#### Usage
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
then run the following steps:
```ShellSession
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
```
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
As a consequence, `ansible-playbook` command will fail with:
```raw
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
```
probably pointing on a task depending on a module present in requirements.txt.
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession
git checkout v2.20.0
docker pull quay.io/kubespray/kubespray:v2.20.0
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
quay.io/kubespray/kubespray:v2.20.0 bash
# Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
```
### Vagrant
For Vagrant we need to install python dependencies for provisioning tasks.
Check if Python and pip are installed:
```ShellSession
python -V && pip -V
```
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
then run the following step:
```ShellSession
vagrant up
```
## Documents
- [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md)
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
- [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md)
- [DNS stack](docs/dns-stack.md)
- [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md)
- [Flatcar Container Linux bootstrap](docs/flatcar.md)
- [Fedora CoreOS bootstrap](docs/fcos.md)
- [Debian Jessie setup](docs/debian.md)
- [openSUSE setup](docs/opensuse.md)
- [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md)
- [AWS](docs/aws.md)
- [Azure](docs/azure.md)
- [vSphere](docs/vsphere.md)
- [Equinix Metal](docs/equinix-metal.md)
- [Large deployments](docs/large-deployments.md)
- [Adding/replacing a node](docs/nodes.md)
- [Upgrades basics](docs/upgrades.md)
- [Air-Gap installation](docs/offline-environment.md)
- [NTP](docs/ntp.md)
- [Hardening](docs/hardening.md)
- [Mirror](docs/mirror.md)
- [Roadmap](docs/roadmap.md)
## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk**
- **Debian** Bullseye, Buster, Jessie, Stretch
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
- **Fedora** 35, 36
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
- [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.6.14
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
- [calico](https://github.com/projectcalico/calico) v3.24.5
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.12.1
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
- [multus](https://github.com/intel/multus-cni) v3.8
- [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
- Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1
- [coredns](https://github.com/coredns/coredns) v1.9.3
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
- [argocd](https://argoproj.github.io/) v2.5.5
- [helm](https://helm.sh/) v3.10.3
- [metallb](https://metallb.universe.tf/) v0.12.1
- [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
## Container Runtime Notes
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
## Requirements
- **Minimum required version of Kubernetes is v1.23**
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**.
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method
should be configured in the target servers. Then the `ansible_become` flag
or command parameters `--become or -b` should be specified.
Hardware:
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
- Master
- Memory: 1500 MB
- Node
- Memory: 1024 MB
## Network Plugins
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
designed to give you the most efficient networking across a range of situations, including non-overlay
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
pods, and (if using Istio and Envoy) applications at the service mesh layer.
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md).
## Ingress Plugins
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
## Community docs and resources
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0)
## Tools and projects on top of Kubespray
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
- [Kubean](https://github.com/kubean-io/kubean)
## CI Tests
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
See the [test matrix](docs/test_cases.md) for details.

83
kubespray/RELEASE.md Normal file
View File

@@ -0,0 +1,83 @@
# Release Process
The Kubespray Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
3. The `kube_version_min_required` variable is set to `n-1`
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
7. An approver creates a release branch in the form `release-X.Y`
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
10. The release issue is closed
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
## Major/minor releases and milestones
* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags.
* Security patches and bugs might be backported.
* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
[GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones).
That milestone remains open for the major/minor releases support lifetime,
which ends once the milestone is closed. Then only a next major or minor release
can be done.
* Kubespray major and minor releases are bound to the given `kube_version` major/minor
version numbers and other components' arbitrary versions, like etcd or network plugins.
Older or newer component versions are not supported and not tested for the given
release (even if included in the checksum variables, like `kubeadm_checksums`).
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
[semver](https://semver.org/). Every version describes only a stable release.
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
playbooks, shall be described in the release notes. Other breaking changes, if any in
the contributed addons or bound versions of Kubernetes and other components, are
considered out of Kubespray scope and are up to the components' teams to deal with and
document.
* Minor releases can change components' versions, but not the major `kube_version`.
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
and *any* changes to other components, like etcd v4, or calico 1.2.3.
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
## Release note creation
You can create a release note with:
```shell
export GITHUB_TOKEN=<your-github-token>
export ORG=kubernetes-sigs
export REPO=kubespray
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
```
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
## Container image creation
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
```shell
cd kubespray/
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
```
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
```shell
cd kubespray/test-infra/vagrant-docker/
./build vX.Y.Z
```
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
If you don't have the permission, please ask it on the #kubespray-dev channel.

View File

@@ -0,0 +1,15 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Committee to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
mattymo
floryut
oomichi
cristicalin

275
kubespray/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,275 @@
# -*- mode: ruby -*-
# # vi: set ft=ruby :
# For help on using kubespray with vagrant, check out docs/vagrant.md
require 'fileutils'
Vagrant.require_version ">= 2.0.0"
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
# Uniq disk UUID for libvirt
DISK_UUID = Time.now.utc.to_i
SUPPORTED_OS = {
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
"centos" => {box: "centos/7", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos8" => {box: "centos/8", user: "vagrant"},
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
}
if File.exist?(CONFIG)
require CONFIG
end
# Defaults for config options defined in CONFIG
$num_instances ||= 3
$instance_name_prefix ||= "k8s"
$vm_gui ||= false
$vm_memory ||= 2048
$vm_cpus ||= 2
$shared_folders ||= {}
$forwarded_ports ||= {}
$subnet ||= "172.18.8"
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
$os ||= "ubuntu1804"
$network_plugin ||= "flannel"
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
$multi_networking ||= "False"
$download_run_once ||= "True"
$download_force_cache ||= "False"
# The first three nodes are etcd servers
$etcd_instances ||= $num_instances
# The first two nodes are kube masters
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
# All nodes are kube nodes
$kube_node_instances ||= $num_instances
# The following only works when using the libvirt provider
$kube_node_instances_with_disks ||= false
$kube_node_instances_with_disks_size ||= "20G"
$kube_node_instances_with_disks_number ||= 2
$override_disk_size ||= false
$disk_size ||= "20GB"
$local_path_provisioner_enabled ||= "False"
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
$libvirt_nested ||= false
# boolean or string (e.g. "-vvv")
$ansible_verbosity ||= false
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
$playbook ||= "cluster.yml"
host_vars = {}
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = "inventory/sample" if ! $inventory
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
# if $inventory has a hosts.ini file use it, otherwise copy over
# vars etc to where vagrant expects dynamic inventory to be
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
FileUtils.rm_f($vagrant_inventory)
FileUtils.ln_s($inventory, $vagrant_inventory)
end
if Vagrant.has_plugin?("vagrant-proxyconf")
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
(1..$num_instances).each do |i|
$no_proxy += ",#{$subnet}.#{i+100}"
end
end
Vagrant.configure("2") do |config|
config.vm.box = $box
if SUPPORTED_OS[$os].has_key? :box_url
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
end
config.ssh.username = SUPPORTED_OS[$os][:user]
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
end
# always use Vagrants insecure key
config.ssh.insert_key = false
if ($override_disk_size)
unless Vagrant.has_plugin?("vagrant-disksize")
system "vagrant plugin install vagrant-disksize"
end
config.disksize.size = $disk_size
end
(1..$num_instances).each do |i|
config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
node.vm.hostname = vm_name
if Vagrant.has_plugin?("vagrant-proxyconf")
node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
node.proxy.no_proxy = $no_proxy
end
["vmware_fusion", "vmware_workstation"].each do |vmware|
node.vm.provider vmware do |v|
v.vmx['memsize'] = $vm_memory
v.vmx['numvcpus'] = $vm_cpus
end
end
node.vm.provider :virtualbox do |vb|
vb.memory = $vm_memory
vb.cpus = $vm_cpus
vb.gui = $vm_gui
vb.linked_clone = true
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
vb.customize ["modifyvm", :id, "--audio", "none"]
end
node.vm.provider :libvirt do |lv|
lv.nested = $libvirt_nested
lv.cpu_mode = "host-model"
lv.memory = $vm_memory
lv.cpus = $vm_cpus
lv.default_prefix = 'kubespray'
# Fix kernel panic on fedora 28
if $os == "fedora"
lv.cpu_mode = "host-passthrough"
end
end
if $kube_node_instances_with_disks
# Libvirt
driverletters = ('a'..'z').to_a
node.vm.provider :libvirt do |lv|
# always make /dev/sd{a/b/c} so that CI can ensure that
# virtualbox and libvirt will have the same devices to use for OSDs
(1..$kube_node_instances_with_disks_number).each do |d|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
end
end
end
if $expose_docker_tcp
node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
end
$forwarded_ports.each do |guest, host|
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
end
if ["rhel7","rhel8"].include? $os
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
# be installed until the host is registered with a valid Red Hat support subscription
node.vm.synced_folder ".", "/vagrant", disabled: false
$shared_folders.each do |src, dst|
node.vm.synced_folder src, dst
end
else
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
$shared_folders.each do |src, dst|
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
end
end
ip = "#{$subnet}.#{i+100}"
node.vm.network :private_network, ip: ip,
:libvirt__guest_ipv6 => 'yes',
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
:libvirt__ipv6_prefix => "64",
:libvirt__forward_mode => "none",
:libvirt__dhcp_enabled => false
# Disable swap for each vm
node.vm.provision "shell", inline: "swapoff -a"
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
if ["ubuntu1804", "ubuntu2004"].include? $os
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
end
# Disable firewalld on oraclelinux/redhat vms
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
end
host_vars[vm_name] = {
"ip": ip,
"flannel_interface": "eth1",
"kube_network_plugin": $network_plugin,
"kube_network_plugin_multus": $multi_networking,
"download_run_once": $download_run_once,
"download_localhost": "False",
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
# Make kubespray cache even when download_run_once is false
"download_force_cache": $download_force_cache,
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
"download_keep_remote_cache": "False",
"docker_rpm_keepcache": "1",
# These two settings will put kubectl and admin.config in $inventory/artifacts
"kubeconfig_localhost": "True",
"kubectl_localhost": "True",
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
}
# Only execute the Ansible provisioner once, when all the machines are up and ready.
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
if i == $num_instances
node.vm.provision "ansible" do |ansible|
ansible.playbook = $playbook
ansible.verbose = $ansible_verbosity
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
if File.exist?($ansible_inventory_path)
ansible.inventory_path = $ansible_inventory_path
end
ansible.become = true
ansible.limit = "all,localhost"
ansible.host_key_checking = false
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
ansible.host_vars = host_vars
if $ansible_tags != ""
ansible.tags = [$ansible_tags]
end
ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
}
end
end
end
end
end

2
kubespray/_config.yml Normal file
View File

@@ -0,0 +1,2 @@
---
theme: jekyll-theme-slate

22
kubespray/ansible.cfg Normal file
View File

@@ -0,0 +1,22 @@
[ssh_connection]
pipelining=True
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults]
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
force_valid_group_names = ignore
host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp
fact_caching_timeout = 86400
stdout_callback = default
display_skipped_hosts = no
library = ./library
callbacks_enabled = profile_tasks,ara_default
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
[inventory]
ignore_patterns = artifacts, credentials

View File

@@ -0,0 +1,33 @@
---
- hosts: localhost
gather_facts: false
become: no
vars:
minimal_ansible_version: 2.11.0
maximal_ansible_version: 2.13.0
ansible_connection: local
tags: always
tasks:
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
assert:
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
that:
- ansible_version.string is version(minimal_ansible_version, ">=")
- ansible_version.string is version(maximal_ansible_version, "<")
tags:
- check
- name: "Check that python netaddr is installed"
assert:
msg: "Python netaddr is not present"
that: "'127.0.0.1' | ipaddr"
tags:
- check
# CentOS 7 provides too old jinja version
- name: "Check that jinja is not too old (install via pip)"
assert:
msg: "Your Jinja version is too old, install via pip"
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
tags:
- check

131
kubespray/cluster.yml Normal file
View File

@@ -0,0 +1,131 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- hosts: k8s_cluster:etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/control-plane, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

View File

@@ -0,0 +1,3 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

View File

@@ -0,0 +1,27 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,45 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env python
from __future__ import print_function
import boto3
import os
import argparse
import json
class SearchEC2Tags(object):
def __init__(self):
self.parse_args()
if self.args.list:
self.search_tags()
if self.args.host:
data = {}
print(json.dumps(data, indent=2))
def parse_args(self):
##Check if VPC_VISIBILITY is set, if not default to private
if "VPC_VISIBILITY" in os.environ:
self.vpc_visibility = os.environ['VPC_VISIBILITY']
else:
self.vpc_visibility = "private"
##Support --list and --host flags. We largely ignore the host one.
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def search_tags(self):
hosts = {}
hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube_control_plane", "kube_node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
region = os.environ['REGION']
ec2 = boto3.resource('ec2', region)
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
cluster_name = os.getenv('CLUSTER_NAME')
if cluster_name:
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
instances = ec2.instances.filter(Filters=filters)
for instance in instances:
##Suppose default vpc_visibility is private
dns_name = instance.private_dns_name
ansible_host = {
'ansible_ssh_host': instance.private_ip_address
}
##Override when vpc_visibility actually is public
if self.vpc_visibility == "public":
dns_name = instance.public_dns_name
ansible_host = {
'ansible_ssh_host': instance.public_ip_address
}
##Set when instance actually has node_labels
node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags))
if node_labels_tag:
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
print(json.dumps(hosts, sort_keys=True, indent=2))
SearchEC2Tags()

View File

@@ -0,0 +1 @@
boto3 # Apache-2.0

2
kubespray/contrib/azurerm/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.generated
/inventory

View File

@@ -0,0 +1,67 @@
# Kubernetes on Azure with Azure Resource Group Templates
Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates)
## Status
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
## Requirements
- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest)
- Dedicated Resource Group created in the Azure Portal or through azure-cli
## Configuration through group_vars/all
You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally
unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public
key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes
experience.
## Bastion host
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
also removes all public IPs from all other VMs.
## Generating and applying
To generate and apply the templates, call:
```shell
./apply-rg.sh <resource_group_name>
```
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
take care about creating/modifying whatever is needed.
## Clearing a resource group
If you need to delete all resources from a resource group, simply call:
```shell
./clear-rg.sh <resource_group_name>
```
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Installing Ansible and the dependencies
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
## Generating an inventory for kubespray
After you have applied the templates, you can generate an inventory with this call:
```shell
./generate-inventory.sh <resource_group_name>
```
It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell
cd kubespray-root-dir
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
```

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
# check if azure cli 2.0 exists else use azure cli 1.0
if az &>/dev/null; then
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
else
echo "Azure cli not found"
fi

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
gather_facts: False
roles:
- generate-inventory

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
gather_facts: False
roles:
- generate-inventory_2

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
gather_facts: False
roles:
- generate-templates

View File

@@ -0,0 +1,51 @@
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
# this name must be globally unique - it will be used as a prefix for azure components
cluster_name: example
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
# node that can be used to access the masters and minions
use_bastion: false
# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
# bastion_domain_prefix: k8s-bastion
number_of_k8s_masters: 3
number_of_k8s_nodes: 3
masters_vm_size: Standard_A2
masters_os_disk_size: 1000
minions_vm_size: Standard_A2
minions_os_disk_size: 1000
admin_username: devops
admin_password: changeme
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
ssh_public_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
disablePasswordAuthentication: true
# Azure CIDRs
azure_vnet_cidr: 10.0.0.0/8
azure_admin_cidr: 10.241.2.0/24
azure_masters_cidr: 10.0.4.0/24
azure_minions_cidr: 10.240.0.0/16
# Azure loadbalancer port to use to access your cluster
kube_apiserver_port: 6443
# Azure Netwoking and storage naming to use with inventory/all.yml
#azure_virtual_network_name: KubeVNET
#azure_subnet_admin_name: ad-subnet
#azure_subnet_masters_name: master-subnet
#azure_subnet_minions_name: minion-subnet
#azure_route_table_name: routetable
#azure_security_group_name: secgroup
# Storage types available are: "Standard_LRS","Premium_LRS"
#azure_storage_account_type: Standard_LRS

View File

@@ -0,0 +1,15 @@
---
- name: Query Azure VMs # noqa 301
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd
- name: Set vm_list
set_fact:
vm_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
mode: 0644

View File

@@ -0,0 +1,33 @@
{% for vm in vm_list %}
{% if not use_bastion or vm.name == 'bastion' %}
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
{% else %}
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
{% endif %}
{% endfor %}
[kube_control_plane]
{% for vm in vm_list %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[etcd]
{% for vm in vm_list %}
{% if 'etcd' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[kube_node]
{% for vm in vm_list %}
{% if 'kube_node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[k8s_cluster:children]
kube_node
kube_control_plane

View File

@@ -0,0 +1,31 @@
---
- name: Query Azure VMs IPs # noqa 301
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd
- name: Query Azure VMs Roles # noqa 301
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- name: Query Azure Load Balancer Public IP # noqa 301
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd
- name: Set VM IP, roles lists and load balancer public IP
set_fact:
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
vm_roles_list: "{{ vm_list_cmd.stdout }}"
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
mode: 0644
- name: Generate Load Balancer variables
template:
src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
mode: 0644

View File

@@ -0,0 +1,34 @@
{% for vm in vm_ip_list %}
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% else %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% endif %}
{% endfor %}
[kube_control_plane]
{% for vm in vm_roles_list %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[etcd]
{% for vm in vm_roles_list %}
{% if 'etcd' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[kube_node]
{% for vm in vm_roles_list %}
{% if 'kube_node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[k8s_cluster:children]
kube_node
kube_control_plane

View File

@@ -0,0 +1,8 @@
## External LB example config
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
loadbalancer_apiserver:
address: {{ lb_pubip.ipAddress }}
port: 6443
## Internal loadbalancers for apiservers
loadbalancer_apiserver_localhost: false

View File

@@ -0,0 +1,37 @@
---
apiVersion: "2015-06-15"
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
nameSuffix: "{{ cluster_name }}"
availabilitySetMasters: "master-avs"
availabilitySetMinions: "minion-avs"
faultDomainCount: 3
updateDomainCount: 10
bastionVmSize: Standard_A0
bastionVMName: bastion
bastionIPAddressName: bastion-pubip
disablePasswordAuthentication: true
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
imageReference:
publisher: "OpenLogic"
offer: "CentOS"
sku: "7.5"
version: "latest"
imageReferenceJson: "{{imageReference|to_json}}"
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"

View File

@@ -0,0 +1,25 @@
---
- name: Set base_dir
set_fact:
base_dir: "{{ playbook_dir }}/.generated/"
- name: Create base_dir
file:
path: "{{ base_dir }}"
state: directory
recurse: true
mode: 0755
- name: Store json files in base_dir
template:
src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}"
mode: 0644
with_items:
- network.json
- storage.json
- availability-sets.json
- bastion.json
- masters.json
- minions.json
- clear-rg.json

View File

@@ -0,0 +1,30 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
},
"resources": [
{
"type": "Microsoft.Compute/availabilitySets",
"name": "{{availabilitySetMasters}}",
"apiVersion": "{{apiVersion}}",
"location": "[resourceGroup().location]",
"properties": {
"PlatformFaultDomainCount": "{{faultDomainCount}}",
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
}
},
{
"type": "Microsoft.Compute/availabilitySets",
"name": "{{availabilitySetMinions}}",
"apiVersion": "{{apiVersion}}",
"location": "[resourceGroup().location]",
"properties": {
"PlatformFaultDomainCount": "{{faultDomainCount}}",
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
}
}
]
}

View File

@@ -0,0 +1,106 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
"subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]"
},
"resources": [
{% if use_bastion %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "{{bastionIPAddressName}}",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "Static",
"dnsSettings": {
{% if bastion_domain_prefix %}
"domainNameLabel": "{{ bastion_domain_prefix }}"
{% endif %}
}
}
},
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkInterfaces",
"name": "{{bastionVMName}}-nic",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]"
],
"properties": {
"ipConfigurations": [
{
"name": "BastionIpConfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]"
},
"subnet": {
"id": "[variables('subnetAdminRef')]"
}
}
}
]
}
},
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Compute/virtualMachines",
"name": "{{bastionVMName}}",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]"
],
"tags": {
"roles": "bastion"
},
"properties": {
"hardwareProfile": {
"vmSize": "{{bastionVmSize}}"
},
"osProfile": {
"computerName": "{{bastionVMName}}",
"adminUsername": "{{admin_username}}",
"adminPassword": "{{admin_password}}",
"linuxConfiguration": {
"disablePasswordAuthentication": "true",
"ssh": {
"publicKeys": [
{% for key in ssh_public_keys %}
{
"path": "{{sshKeyPath}}",
"keyData": "{{key}}"
}{% if loop.index < ssh_public_keys | length %},{% endif %}
{% endfor %}
]
}
}
},
"storageProfile": {
"imageReference": {{imageReferenceJson}},
"osDisk": {
"name": "osdisk",
"vhd": {
"uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]"
},
"caching": "ReadWrite",
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]"
}
]
}
}
}
{% endif %}
]
}

View File

@@ -0,0 +1,8 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [],
"outputs": {}
}

View File

@@ -0,0 +1,198 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
"lbDomainName": "{{nameSuffix}}-api",
"lbPublicIPAddressName": "kubernetes-api-pubip",
"lbPublicIPAddressType": "Static",
"lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]",
"lbName": "kubernetes-api",
"lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]",
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
"kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]"
},
"resources": [
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "[variables('lbPublicIPAddressName')]",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]",
"dnsSettings": {
"domainNameLabel": "[variables('lbDomainName')]"
}
}
},
{
"apiVersion": "{{apiVersion}}",
"name": "[variables('lbName')]",
"type": "Microsoft.Network/loadBalancers",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]"
],
"properties": {
"frontendIPConfigurations": [
{
"name": "kube-api-frontend",
"properties": {
"publicIPAddress": {
"id": "[variables('lbPublicIPAddressID')]"
}
}
}
],
"backendAddressPools": [
{
"name": "kube-api-backend"
}
],
"loadBalancingRules": [
{
"name": "kube-api",
"properties": {
"frontendIPConfiguration": {
"id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]"
},
"backendAddressPool": {
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
},
"protocol": "tcp",
"frontendPort": "{{kube_apiserver_port}}",
"backendPort": "{{kube_apiserver_port}}",
"enableFloatingIP": false,
"idleTimeoutInMinutes": 5,
"probe": {
"id": "[concat(variables('lbID'), '/probes/kube-api')]"
}
}
}
],
"probes": [
{
"name": "kube-api",
"properties": {
"protocol": "tcp",
"port": "{{kube_apiserver_port}}",
"intervalInSeconds": 5,
"numberOfProbes": 2
}
}
]
}
},
{% for i in range(number_of_k8s_masters) %}
{% if not use_bastion %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "master-{{i}}-pubip",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "Static"
}
},
{% endif %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkInterfaces",
"name": "master-{{i}}-nic",
"location": "[resourceGroup().location]",
"dependsOn": [
{% if not use_bastion %}
"[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]",
{% endif %}
"[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]"
],
"properties": {
"ipConfigurations": [
{
"name": "MastersIpConfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
{% if not use_bastion %}
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]"
},
{% endif %}
"subnet": {
"id": "[variables('kubeMastersSubnetRef')]"
},
"loadBalancerBackendAddressPools": [
{
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
}
]
}
}
],
"networkSecurityGroup": {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
},
"enableIPForwarding": true
}
},
{
"type": "Microsoft.Compute/virtualMachines",
"name": "master-{{i}}",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
],
"tags": {
"roles": "kube_control_plane,etcd"
},
"apiVersion": "{{apiVersion}}",
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]"
},
"hardwareProfile": {
"vmSize": "{{masters_vm_size}}"
},
"osProfile": {
"computerName": "master-{{i}}",
"adminUsername": "{{admin_username}}",
"adminPassword": "{{admin_password}}",
"linuxConfiguration": {
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
"ssh": {
"publicKeys": [
{% for key in ssh_public_keys %}
{
"path": "{{sshKeyPath}}",
"keyData": "{{key}}"
}{% if loop.index < ssh_public_keys | length %},{% endif %}
{% endfor %}
]
}
}
},
"storageProfile": {
"imageReference": {{imageReferenceJson}},
"osDisk": {
"name": "ma{{nameSuffix}}{{i}}",
"vhd": {
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]"
},
"caching": "ReadWrite",
"createOption": "FromImage",
"diskSizeGB": "{{masters_os_disk_size}}"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]"
}
]
}
}
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}

View File

@@ -0,0 +1,115 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
"kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]"
},
"resources": [
{% for i in range(number_of_k8s_nodes) %}
{% if not use_bastion %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "minion-{{i}}-pubip",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "Static"
}
},
{% endif %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkInterfaces",
"name": "minion-{{i}}-nic",
"location": "[resourceGroup().location]",
"dependsOn": [
{% if not use_bastion %}
"[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]"
{% endif %}
],
"properties": {
"ipConfigurations": [
{
"name": "MinionsIpConfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
{% if not use_bastion %}
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]"
},
{% endif %}
"subnet": {
"id": "[variables('kubeMinionsSubnetRef')]"
}
}
}
],
"networkSecurityGroup": {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
},
"enableIPForwarding": true
}
},
{
"type": "Microsoft.Compute/virtualMachines",
"name": "minion-{{i}}",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
],
"tags": {
"roles": "kube_node"
},
"apiVersion": "{{apiVersion}}",
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]"
},
"hardwareProfile": {
"vmSize": "{{minions_vm_size}}"
},
"osProfile": {
"computerName": "minion-{{i}}",
"adminUsername": "{{admin_username}}",
"adminPassword": "{{admin_password}}",
"linuxConfiguration": {
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
"ssh": {
"publicKeys": [
{% for key in ssh_public_keys %}
{
"path": "{{sshKeyPath}}",
"keyData": "{{key}}"
}{% if loop.index < ssh_public_keys | length %},{% endif %}
{% endfor %}
]
}
}
},
"storageProfile": {
"imageReference": {{imageReferenceJson}},
"osDisk": {
"name": "mi{{nameSuffix}}{{i}}",
"vhd": {
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]"
},
"caching": "ReadWrite",
"createOption": "FromImage",
"diskSizeGB": "{{minions_os_disk_size}}"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]"
}
]
}
}
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}

View File

@@ -0,0 +1,109 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
},
"resources": [
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/routeTables",
"name": "{{routeTableName}}",
"location": "[resourceGroup().location]",
"properties": {
"routes": [
]
}
},
{
"type": "Microsoft.Network/virtualNetworks",
"name": "{{virtualNetworkName}}",
"location": "[resourceGroup().location]",
"apiVersion": "{{apiVersion}}",
"dependsOn": [
"[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]"
],
"properties": {
"addressSpace": {
"addressPrefixes": [
"{{azure_vnet_cidr}}"
]
},
"subnets": [
{
"name": "{{subnetMastersName}}",
"properties": {
"addressPrefix": "{{azure_masters_cidr}}",
"routeTable": {
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
}
}
},
{
"name": "{{subnetMinionsName}}",
"properties": {
"addressPrefix": "{{azure_minions_cidr}}",
"routeTable": {
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
}
}
}
{% if use_bastion %}
,{
"name": "{{subnetAdminName}}",
"properties": {
"addressPrefix": "{{azure_admin_cidr}}",
"routeTable": {
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
}
}
}
{% endif %}
]
}
},
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkSecurityGroups",
"name": "{{securityGroupName}}",
"location": "[resourceGroup().location]",
"properties": {
"securityRules": [
{% if not use_bastion %}
{
"name": "ssh",
"properties": {
"description": "Allow SSH",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "22",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 100,
"direction": "Inbound"
}
},
{% endif %}
{
"name": "kube-api",
"properties": {
"description": "Allow secure kube-api",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "{{kube_apiserver_port}}",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 101,
"direction": "Inbound"
}
}
]
},
"resources": [],
"dependsOn": []
}
]
}

View File

@@ -0,0 +1,19 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
},
"resources": [
{
"type": "Microsoft.Storage/storageAccounts",
"name": "{{storageAccountName}}",
"location": "[resourceGroup().location]",
"apiVersion": "{{apiVersion}}",
"properties": {
"accountType": "{{storageAccountType}}"
}
}
]
}

View File

@@ -0,0 +1,177 @@
# Kubespray DIND experimental setup
This ansible playbook creates local docker containers
to serve as Kubernetes "nodes", which in turn will run
"normal" Kubernetes docker containers, a mode usually
called DIND (Docker-IN-Docker).
The playbook has two roles:
- dind-host: creates the "nodes" as containers in localhost, with
appropriate settings for DIND (privileged, volume mapping for dind
storage, etc).
- dind-cluster: customizes each node container to have required
system packages installed, and some utils (swapoff, lsattr)
symlinked to /bin/true to ease mimicking a real node.
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
as docker images (note that dind-cluster has specific customization
for these images).
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
helper (wraps up running `contrib/inventory_builder/inventory.py` with
node containers IPs and prefix).
## Deploying
See below for a complete successful run:
1. Create the node containers
```shell
# From the kubespray root dir
cd contrib/dind
pip install -r requirements.txt
ansible-playbook -i hosts dind-cluster.yaml
# Back to kubespray root
cd ../..
```
NOTE: if the playbook run fails with something like below error
message, you may need to specifically set `ansible_python_interpreter`,
see `./hosts` file for an example expanded localhost entry.
```shell
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
```
2. Customize kubespray-dind.yaml
Note that there's coupling between above created node containers
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
(as set in `group_vars/all/all.yaml`), and docker settings.
```shell
$EDITOR contrib/dind/kubespray-dind.yaml
```
3. Prepare the inventory and run the playbook
```shell
INVENTORY_DIR=inventory/local-dind
mkdir -p ${INVENTORY_DIR}
rm -f ${INVENTORY_DIR}/hosts.ini
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
```
NOTE: You could also test other distros without editing files by
passing `--extra-vars` as per below commandline,
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
```shell
cd contrib/dind
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
cd ../..
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
```
## Resulting deployment
See below to get an idea on how a completed deployment looks like,
from the host where you ran kubespray playbooks.
### node_distro: debian
Running from an Ubuntu Xenial host:
```shell
$ uname -a
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
$ docker exec kube-node1 kubectl get node
NAME STATUS ROLES AGE VERSION
kube-node1 Ready master,node 18m v1.12.1
kube-node2 Ready master,node 17m v1.12.1
kube-node3 Ready node 17m v1.12.1
kube-node4 Ready node 17m v1.12.1
kube-node5 Ready node 17m v1.12.1
$ docker exec kube-node1 kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default netchecker-agent-67489 1/1 Running 0 2m51s
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
default netchecker-agent-fsw92 1/1 Running 0 2m51s
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
kube-system kube-proxy-n64ls 1/1 Running 0 17m
kube-system kube-proxy-pswmj 1/1 Running 0 18m
kube-system kube-proxy-x89qw 1/1 Running 0 18m
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
kube-system weave-net-42bfr 2/2 Running 0 16m
kube-system weave-net-6gt8m 2/2 Running 0 16m
kube-system weave-net-88nnc 2/2 Running 0 16m
kube-system weave-net-shckr 2/2 Running 0 16m
kube-system weave-net-xr46t 2/2 Running 0 16m
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
```
## Using ./run-test-distros.sh
You can use `./run-test-distros.sh` to run a set of tests via DIND,
and excerpt from this script, to get an idea:
```shell
# The SPEC file(s) must have two arrays as e.g.
# DISTROS=(debian centos)
# EXTRAS=(
# 'kube_network_plugin=calico'
# 'kube_network_plugin=flannel'
# 'kube_network_plugin=weave'
# )
# that will be tested in a "combinatory" way (e.g. from above there'll be
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
#
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
# to main kubespray ansible-playbook run.
```
See e.g. `test-some_distros-most_CNIs.env` and
`test-some_distros-kube_router_combo.env` in particular for a richer
set of CNI specific `--extra-vars` combo.

View File

@@ -0,0 +1,9 @@
---
- hosts: localhost
gather_facts: False
roles:
- { role: dind-host }
- hosts: containers
roles:
- { role: dind-cluster }

View File

@@ -0,0 +1,3 @@
---
# See distro.yaml for supported node_distro images
node_distro: debian

View File

@@ -0,0 +1,41 @@
---
distro_settings:
debian: &DEBIAN
image: "debian:9.5"
user: "debian"
pid1_exe: /lib/systemd/systemd
init: |
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
raw_setup_done: test -x /usr/bin/sudo
agetty_svc: getty@*
ssh_service: ssh
extra_packages: []
ubuntu:
<<: *DEBIAN
image: "ubuntu:16.04"
user: "ubuntu"
init: |
/sbin/init
centos: &CENTOS
image: "centos:7"
user: "centos"
pid1_exe: /usr/lib/systemd/systemd
init: |
/sbin/init
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
raw_setup_done: test -x /usr/bin/sudo
agetty_svc: getty@* serial-getty@*
ssh_service: sshd
extra_packages: []
fedora:
<<: *CENTOS
image: "fedora:latest"
user: "fedora"
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
extra_packages:
- hostname
- procps
- findutils
- kmod
- iputils

View File

@@ -0,0 +1,15 @@
[local]
# If you created a virtualenv for ansible, you may need to specify running the
# python binary from there instead:
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
localhost ansible_connection=local
[containers]
kube-node1
kube-node2
kube-node3
kube-node4
kube-node5
[containers:vars]
ansible_connection=docker

View File

@@ -0,0 +1,22 @@
---
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
# See contrib/dind/README.md
kube_api_anonymous_auth: true
kubelet_fail_swap_on: false
# Docker nodes need to have been created with same "node_distro: debian"
# at contrib/dind/group_vars/all/all.yaml
bootstrap_os: debian
docker_version: latest
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
dns_mode: coredns
deploy_netchecker: True
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
netcheck_agent_image_tag: v1.0
netcheck_server_image_tag: v1.0

View File

@@ -0,0 +1 @@
docker

View File

@@ -0,0 +1,73 @@
---
- name: set_fact distro_setup
set_fact:
distro_setup: "{{ distro_settings[node_distro] }}"
- name: set_fact other distro settings
set_fact:
distro_user: "{{ distro_setup['user'] }}"
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
- name: Null-ify some linux tools to ease DIND
file:
src: "/bin/true"
dest: "{{ item }}"
state: link
force: yes
with_items:
# DIND box may have swap enable, don't bother
- /sbin/swapoff
# /etc/hosts handling would fail on trying to copy file attributes on edit,
# void it by successfully returning nil output
- /usr/bin/lsattr
# disable selinux-isms, sp needed if running on non-Selinux host
- /usr/sbin/semodule
- name: Void installing dpkg docs and man pages on Debian based distros
copy:
content: |
# Delete locales
path-exclude=/usr/share/locale/*
# Delete man pages
path-exclude=/usr/share/man/*
# Delete docs
path-exclude=/usr/share/doc/*
path-include=/usr/share/doc/*/copyright
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
mode: 0644
when:
- ansible_os_family == 'Debian'
- name: Install system packages to better match a full-fledge node
package:
name: "{{ item }}"
state: present
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
- name: Start needed services
service:
name: "{{ item }}"
state: started
with_items:
- rsyslog
- "{{ distro_ssh_service }}"
- name: Create distro user "{{ distro_user }}"
user:
name: "{{ distro_user }}"
uid: 1000
# groups: sudo
append: yes
- name: Allow password-less sudo to "{{ distro_user }}"
copy:
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
dest: "/etc/sudoers.d/{{ distro_user }}"
mode: 0640
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
authorized_key:
user: "{{ distro_user }}"
state: present
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"

View File

@@ -0,0 +1,88 @@
---
- name: set_fact distro_setup
set_fact:
distro_setup: "{{ distro_settings[node_distro] }}"
- name: set_fact other distro settings
set_fact:
distro_image: "{{ distro_setup['image'] }}"
distro_init: "{{ distro_setup['init'] }}"
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
- name: Create dind node containers from "containers" inventory section
docker_container:
image: "{{ distro_image }}"
name: "{{ item }}"
state: started
hostname: "{{ item }}"
command: "{{ distro_init }}"
# recreate: yes
privileged: true
tmpfs:
- /sys/module/nf_conntrack/parameters
volumes:
- /boot:/boot
- /lib/modules:/lib/modules
- "{{ item }}:/dind/docker"
register: containers
with_items: "{{ groups.containers }}"
tags:
- addresses
- name: Gather list of containers IPs
set_fact:
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
tags:
- addresses
- name: Create inventory_builder helper already set with the list of node containers' IPs
template:
src: inventory_builder.sh.j2
dest: /tmp/kubespray.dind.inventory_builder.sh
mode: 0755
tags:
- addresses
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
raw: |
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
pkill -STOP agetty || true
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
{{ distro_raw_setup }}
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
with_items: "{{ containers.results }}"
register: result
changed_when: result.stdout.find("SKIPPED") < 0
- name: Remove gettys from node containers
raw: |
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
systemctl disable {{ distro_agetty_svc }}
systemctl stop {{ distro_agetty_svc }}
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
with_items: "{{ containers.results }}"
changed_when: false
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id
cmp /etc/machine-id /etc/machine-id~ || true
systemctl daemon-reload
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
with_items: "{{ containers.results }}"
- name: Early hack image install to adapt for DIND
# noqa 302 - this task uses the raw module intentionally
raw: |
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
with_items: "{{ containers.results }}"
register: result
changed_when: result.stdout.find("removed") >= 0

View File

@@ -0,0 +1,3 @@
#!/bin/bash
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}

View File

@@ -0,0 +1,93 @@
#!/bin/bash
# Q&D test'em all: creates full DIND kubespray deploys
# for each distro, verifying it via netchecker.
info() {
local msg="$*"
local date="$(date -Isec)"
echo "INFO: [$date] $msg"
}
pass_or_fail() {
local rc="$?"
local msg="$*"
local date="$(date -Isec)"
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
return $rc
}
test_distro() {
local distro=${1:?};shift
local extra="${*:-}"
local prefix="${distro[${extra}]}"
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
pass_or_fail "$prefix: dind-nodes" || return 1
(cd ../..
INVENTORY_DIR=inventory/local-dind
mkdir -p ${INVENTORY_DIR}
rm -f ${INVENTORY_DIR}/hosts.ini
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
# expand $extra with -e in front of each word
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
ansible-playbook --become -e ansible_ssh_user=$distro -i \
${INVENTORY_DIR}/hosts.ini cluster.yml \
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
pass_or_fail "$prefix: kubespray"
) || return 1
local node0=${NODES[0]}
docker exec ${node0} kubectl get pod --all-namespaces
pass_or_fail "$prefix: kube-api" || return 1
let retries=60
while ((retries--)); do
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
sleep 2
done
[ $retries -ge 0 ]
pass_or_fail "$prefix: netcheck" || return 1
}
NODES=($(egrep ^kube_node hosts))
NETCHECKER_HOST=localhost
: ${OUTPUT_DIR:=./out}
mkdir -p ${OUTPUT_DIR}
# The SPEC file(s) must have two arrays as e.g.
# DISTROS=(debian centos)
# EXTRAS=(
# 'kube_network_plugin=calico'
# 'kube_network_plugin=flannel'
# 'kube_network_plugin=weave'
# )
# that will be tested in a "combinatory" way (e.g. from above there'll be
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
#
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
# to main kubespray ansible-playbook run.
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
for spec in ${SPECS}; do
unset DISTROS EXTRAS
echo "Loading file=${spec} ..."
. ${spec} || continue
: ${DISTROS:?} || continue
echo "DISTROS:" "${DISTROS[@]}"
echo "EXTRAS->"
printf " %s\n" "${EXTRAS[@]}"
let n=1
for distro in "${DISTROS[@]}"; do
for extra in "${EXTRAS[@]:-NULL}"; do
# Magic value to let this for run once:
[[ ${extra} == NULL ]] && unset extra
docker rm -f "${NODES[@]}"
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
{
info "${distro}[${extra}] START: file_out=${file_out}"
time test_distro ${distro} ${extra}
} |& tee ${file_out}
# sleeping for the sake of the human to verify if they want
sleep 2m
done
done
done
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)

Some files were not shown because too many files have changed in this diff Show More