kubespray 2.24 추가
This commit is contained in:
@@ -0,0 +1,82 @@
|
||||
---
|
||||
- name: Check azure_tenant_id value
|
||||
fail:
|
||||
msg: "azure_tenant_id is missing"
|
||||
when: azure_tenant_id is not defined or not azure_tenant_id
|
||||
|
||||
- name: Check azure_subscription_id value
|
||||
fail:
|
||||
msg: "azure_subscription_id is missing"
|
||||
when: azure_subscription_id is not defined or not azure_subscription_id
|
||||
|
||||
- name: Check azure_aad_client_id value
|
||||
fail:
|
||||
msg: "azure_aad_client_id is missing"
|
||||
when: azure_aad_client_id is not defined or not azure_aad_client_id
|
||||
|
||||
- name: Check azure_aad_client_secret value
|
||||
fail:
|
||||
msg: "azure_aad_client_secret is missing"
|
||||
when: azure_aad_client_secret is not defined or not azure_aad_client_secret
|
||||
|
||||
- name: Check azure_resource_group value
|
||||
fail:
|
||||
msg: "azure_resource_group is missing"
|
||||
when: azure_resource_group is not defined or not azure_resource_group
|
||||
|
||||
- name: Check azure_location value
|
||||
fail:
|
||||
msg: "azure_location is missing"
|
||||
when: azure_location is not defined or not azure_location
|
||||
|
||||
- name: Check azure_subnet_name value
|
||||
fail:
|
||||
msg: "azure_subnet_name is missing"
|
||||
when: azure_subnet_name is not defined or not azure_subnet_name
|
||||
|
||||
- name: Check azure_security_group_name value
|
||||
fail:
|
||||
msg: "azure_security_group_name is missing"
|
||||
when: azure_security_group_name is not defined or not azure_security_group_name
|
||||
|
||||
- name: Check azure_vnet_name value
|
||||
fail:
|
||||
msg: "azure_vnet_name is missing"
|
||||
when: azure_vnet_name is not defined or not azure_vnet_name
|
||||
|
||||
- name: Check azure_vnet_resource_group value
|
||||
fail:
|
||||
msg: "azure_vnet_resource_group is missing"
|
||||
when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group
|
||||
|
||||
- name: Check azure_route_table_name value
|
||||
fail:
|
||||
msg: "azure_route_table_name is missing"
|
||||
when: azure_route_table_name is not defined or not azure_route_table_name
|
||||
|
||||
- name: Check azure_loadbalancer_sku value
|
||||
fail:
|
||||
msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
|
||||
when: azure_loadbalancer_sku not in ["basic", "standard"]
|
||||
|
||||
- name: "Check azure_exclude_master_from_standard_lb is a bool"
|
||||
assert:
|
||||
that: azure_exclude_master_from_standard_lb | type_debug == 'bool'
|
||||
|
||||
- name: "Check azure_disable_outbound_snat is a bool"
|
||||
assert:
|
||||
that: azure_disable_outbound_snat | type_debug == 'bool'
|
||||
|
||||
- name: "Check azure_use_instance_metadata is a bool"
|
||||
assert:
|
||||
that: azure_use_instance_metadata | type_debug == 'bool'
|
||||
|
||||
- name: Check azure_vmtype value
|
||||
fail:
|
||||
msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'"
|
||||
when: azure_vmtype is not defined or not azure_vmtype
|
||||
|
||||
- name: Check azure_cloud value
|
||||
fail:
|
||||
msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'."
|
||||
when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"]
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Check openstack_auth_url value
|
||||
fail:
|
||||
msg: "openstack_auth_url is missing"
|
||||
when: openstack_auth_url is not defined or not openstack_auth_url
|
||||
|
||||
- name: Check openstack_username value
|
||||
fail:
|
||||
msg: "openstack_username is missing"
|
||||
when: openstack_username is not defined or not openstack_username
|
||||
|
||||
- name: Check openstack_password value
|
||||
fail:
|
||||
msg: "openstack_password is missing"
|
||||
when: openstack_password is not defined or not openstack_password
|
||||
|
||||
- name: Check openstack_region value
|
||||
fail:
|
||||
msg: "openstack_region is missing"
|
||||
when: openstack_region is not defined or not openstack_region
|
||||
|
||||
- name: Check openstack_tenant_id value
|
||||
fail:
|
||||
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
|
||||
when:
|
||||
- openstack_tenant_id is not defined or not openstack_tenant_id
|
||||
- openstack_trust_id is not defined
|
||||
|
||||
- name: Check openstack_trust_id value
|
||||
fail:
|
||||
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
|
||||
when:
|
||||
- openstack_trust_id is not defined or not openstack_trust_id
|
||||
- openstack_tenant_id is not defined
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Check vsphere environment variables
|
||||
fail:
|
||||
msg: "{{ item.name }} is missing"
|
||||
when: item.value is not defined or not item.value
|
||||
with_items:
|
||||
- name: vsphere_vcenter_ip
|
||||
value: "{{ vsphere_vcenter_ip }}"
|
||||
- name: vsphere_vcenter_port
|
||||
value: "{{ vsphere_vcenter_port }}"
|
||||
- name: vsphere_user
|
||||
value: "{{ vsphere_user }}"
|
||||
- name: vsphere_password
|
||||
value: "{{ vsphere_password }}"
|
||||
- name: vsphere_datacenter
|
||||
value: "{{ vsphere_datacenter }}"
|
||||
- name: vsphere_datastore
|
||||
value: "{{ vsphere_datastore }}"
|
||||
- name: vsphere_working_dir
|
||||
value: "{{ vsphere_working_dir }}"
|
||||
- name: vsphere_insecure
|
||||
value: "{{ vsphere_insecure }}"
|
||||
62
roles/kubernetes/node/tasks/facts.yml
Normal file
62
roles/kubernetes/node/tasks/facts.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
- name: Gather cgroups facts for docker
|
||||
when: container_manager == 'docker'
|
||||
block:
|
||||
- name: Look up docker cgroup driver
|
||||
shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: docker_cgroup_driver_result
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: Set kubelet_cgroup_driver_detected fact for docker
|
||||
set_fact:
|
||||
kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
|
||||
|
||||
- name: Gather cgroups facts for crio
|
||||
when: container_manager == 'crio'
|
||||
block:
|
||||
- name: Look up crio cgroup driver
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: crio_cgroup_driver_result
|
||||
changed_when: false
|
||||
|
||||
- name: Set kubelet_cgroup_driver_detected fact for crio
|
||||
set_fact:
|
||||
kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}"
|
||||
|
||||
- name: Set kubelet_cgroup_driver_detected fact for containerd
|
||||
when: container_manager == 'containerd'
|
||||
set_fact:
|
||||
kubelet_cgroup_driver_detected: >-
|
||||
{%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%}
|
||||
|
||||
- name: Set kubelet_cgroup_driver
|
||||
set_fact:
|
||||
kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}"
|
||||
when: kubelet_cgroup_driver is undefined
|
||||
|
||||
- name: Set kubelet_cgroups options when cgroupfs is used
|
||||
set_fact:
|
||||
kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}"
|
||||
kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}"
|
||||
when: kubelet_cgroup_driver == 'cgroupfs'
|
||||
|
||||
- name: Set kubelet_config_extra_args options when cgroupfs is used
|
||||
set_fact:
|
||||
kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}"
|
||||
when: kubelet_cgroup_driver == 'cgroupfs'
|
||||
|
||||
- name: Os specific vars
|
||||
include_vars: "{{ item }}"
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
|
||||
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution | lower }}.yml"
|
||||
- "{{ ansible_os_family | lower }}.yml"
|
||||
skip: true
|
||||
22
roles/kubernetes/node/tasks/install.yml
Normal file
22
roles/kubernetes/node/tasks/install.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Install | Copy kubeadm binary from download dir
|
||||
copy:
|
||||
src: "{{ downloads.kubeadm.dest }}"
|
||||
dest: "{{ bin_dir }}/kubeadm"
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
tags:
|
||||
- kubeadm
|
||||
when:
|
||||
- not inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- name: Install | Copy kubelet binary from download dir
|
||||
copy:
|
||||
src: "{{ downloads.kubelet.dest }}"
|
||||
dest: "{{ bin_dir }}/kubelet"
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
tags:
|
||||
- kubelet
|
||||
- upgrade
|
||||
notify: Node | restart kubelet
|
||||
55
roles/kubernetes/node/tasks/kubelet.yml
Normal file
55
roles/kubernetes/node/tasks/kubelet.yml
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
- name: Set kubelet api version to v1beta1
|
||||
set_fact:
|
||||
kubeletConfig_api_version: v1beta1
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Write kubelet environment config file (kubeadm)
|
||||
template:
|
||||
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
|
||||
dest: "{{ kube_config_dir }}/kubelet.env"
|
||||
setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}"
|
||||
backup: yes
|
||||
mode: 0600
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Write kubelet config file
|
||||
template:
|
||||
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
|
||||
mode: 0600
|
||||
notify: Kubelet | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Write kubelet systemd init file
|
||||
template:
|
||||
src: "kubelet.service.j2"
|
||||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
mode: 0600
|
||||
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:kubelet.service'"
|
||||
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
||||
# Remove once we drop support for systemd < 250
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Flush_handlers and reload-systemd
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Enable kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
enabled: yes
|
||||
state: started
|
||||
tags:
|
||||
- kubelet
|
||||
notify: Kubelet | restart kubelet
|
||||
34
roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
Normal file
34
roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Haproxy | Cleanup potentially deployed nginx-proxy
|
||||
file:
|
||||
path: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
||||
state: absent
|
||||
|
||||
- name: Haproxy | Make haproxy directory
|
||||
file:
|
||||
path: "{{ haproxy_config_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
|
||||
- name: Haproxy | Write haproxy configuration
|
||||
template:
|
||||
src: "loadbalancer/haproxy.cfg.j2"
|
||||
dest: "{{ haproxy_config_dir }}/haproxy.cfg"
|
||||
owner: root
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: Haproxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ haproxy_config_dir }}/haproxy.cfg"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: haproxy_stat
|
||||
|
||||
- name: Haproxy | Write static pod
|
||||
template:
|
||||
src: manifests/haproxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/haproxy.yml"
|
||||
mode: 0640
|
||||
13
roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
Normal file
13
roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: Kube-vip | Check cluster settings for kube-vip
|
||||
fail:
|
||||
msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md"
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp
|
||||
- kube_vip_arp_enabled
|
||||
|
||||
- name: Kube-vip | Write static pod
|
||||
template:
|
||||
src: manifests/kube-vip.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-vip.yml"
|
||||
mode: 0640
|
||||
34
roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
Normal file
34
roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Haproxy | Cleanup potentially deployed haproxy
|
||||
file:
|
||||
path: "{{ kube_manifest_dir }}/haproxy.yml"
|
||||
state: absent
|
||||
|
||||
- name: Nginx-proxy | Make nginx directory
|
||||
file:
|
||||
path: "{{ nginx_config_dir }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
|
||||
- name: Nginx-proxy | Write nginx-proxy configuration
|
||||
template:
|
||||
src: "loadbalancer/nginx.conf.j2"
|
||||
dest: "{{ nginx_config_dir }}/nginx.conf"
|
||||
owner: root
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: Nginx-proxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ nginx_config_dir }}/nginx.conf"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: nginx_stat
|
||||
|
||||
- name: Nginx-proxy | Write static pod
|
||||
template:
|
||||
src: manifests/nginx-proxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
||||
mode: 0640
|
||||
191
roles/kubernetes/node/tasks/main.yml
Normal file
191
roles/kubernetes/node/tasks/main.yml
Normal file
@@ -0,0 +1,191 @@
|
||||
---
|
||||
- name: Fetch facts
|
||||
import_tasks: facts.yml
|
||||
tags:
|
||||
- facts
|
||||
- kubelet
|
||||
|
||||
- name: Pre-upgrade kubelet
|
||||
import_tasks: pre_upgrade.yml
|
||||
tags:
|
||||
- kubelet
|
||||
|
||||
- name: Ensure /var/lib/cni exists
|
||||
file:
|
||||
path: /var/lib/cni
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Install kubelet binary
|
||||
import_tasks: install.yml
|
||||
tags:
|
||||
- kubelet
|
||||
|
||||
- name: Install kube-vip
|
||||
import_tasks: loadbalancer/kube-vip.yml
|
||||
when:
|
||||
- is_kube_master
|
||||
- kube_vip_enabled
|
||||
tags:
|
||||
- kube-vip
|
||||
|
||||
- name: Install nginx-proxy
|
||||
import_tasks: loadbalancer/nginx-proxy.yml
|
||||
when:
|
||||
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
|
||||
- loadbalancer_apiserver_localhost
|
||||
- loadbalancer_apiserver_type == 'nginx'
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
- name: Install haproxy
|
||||
import_tasks: loadbalancer/haproxy.yml
|
||||
when:
|
||||
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
|
||||
- loadbalancer_apiserver_localhost
|
||||
- loadbalancer_apiserver_type == 'haproxy'
|
||||
tags:
|
||||
- haproxy
|
||||
|
||||
- name: Ensure nodePort range is reserved
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_local_reserved_ports
|
||||
value: "{{ kube_apiserver_node_port_range }}"
|
||||
sysctl_set: yes
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: yes
|
||||
when: kube_apiserver_node_port_range is defined
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Verify if br_netfilter module exists
|
||||
command: "modinfo br_netfilter"
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||
register: modinfo_br_netfilter
|
||||
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
# TODO: Remove once upstream issue is fixed
|
||||
# https://github.com/ansible-collections/community.general/issues/7717
|
||||
- name: Verify br_netfilter module path exists
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop:
|
||||
- /etc/modules-load.d
|
||||
- /etc/modprobe.d
|
||||
|
||||
- name: Enable br_netfilter module
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when: modinfo_br_netfilter.rc == 0
|
||||
|
||||
- name: Persist br_netfilter module
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
|
||||
content: br_netfilter
|
||||
mode: 0644
|
||||
when: modinfo_br_netfilter.rc == 0
|
||||
|
||||
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
|
||||
- name: Check if bridge-nf-call-iptables key exists
|
||||
command: "sysctl net.bridge.bridge-nf-call-iptables"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
register: sysctl_bridge_nf_call_iptables
|
||||
|
||||
- name: Enable bridge-nf-call tables
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
value: "1"
|
||||
reload: yes
|
||||
when: sysctl_bridge_nf_call_iptables.rc == 0
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
|
||||
- name: Modprobe Kernel Module for IPVS
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
persistent: present
|
||||
loop: "{{ kube_proxy_ipvs_modules }}"
|
||||
when: kube_proxy_mode == 'ipvs'
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Modprobe conntrack module
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
persistent: present
|
||||
register: modprobe_conntrack_module
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
loop: "{{ conntrack_modules }}"
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs'
|
||||
- "(modprobe_conntrack_module|default({'rc': 1})).rc != 0" # loop until first success
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Check cloud provider credentials
|
||||
include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
|
||||
tags:
|
||||
- cloud-provider
|
||||
- facts
|
||||
|
||||
- name: Test if openstack_cacert is a base64 string
|
||||
set_fact:
|
||||
openstack_cacert_is_base64: "{% if openstack_cacert is search('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}= | [A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'openstack'
|
||||
- openstack_cacert is defined
|
||||
- openstack_cacert | length > 0
|
||||
|
||||
|
||||
- name: Write cacert file
|
||||
copy:
|
||||
src: "{{ openstack_cacert if not openstack_cacert_is_base64 else omit }}"
|
||||
content: "{{ openstack_cacert | b64decode if openstack_cacert_is_base64 else omit }}"
|
||||
dest: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'openstack'
|
||||
- openstack_cacert is defined
|
||||
- openstack_cacert | length > 0
|
||||
tags:
|
||||
- cloud-provider
|
||||
|
||||
- name: Write cloud-config
|
||||
template:
|
||||
src: "cloud-configs/{{ cloud_provider }}-cloud-config.j2"
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws', 'gce' ]
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- cloud-provider
|
||||
|
||||
- name: Install kubelet
|
||||
import_tasks: kubelet.yml
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
48
roles/kubernetes/node/tasks/pre_upgrade.yml
Normal file
48
roles/kubernetes/node/tasks/pre_upgrade.yml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: "Pre-upgrade | check if kubelet container exists"
|
||||
shell: >-
|
||||
set -o pipefail &&
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
{{ docker_bin_dir }}/docker ps -af name=kubelet | grep kubelet
|
||||
{% elif container_manager == 'containerd' %}
|
||||
{{ bin_dir }}/crictl ps --all --name kubelet | grep kubelet
|
||||
{% endif %}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
register: kubelet_container_check
|
||||
|
||||
- name: "Pre-upgrade | copy /var/lib/cni from kubelet"
|
||||
command: >-
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
docker cp kubelet:/var/lib/cni /var/lib/cni
|
||||
{% elif container_manager == 'containerd' %}
|
||||
ctr run --rm --mount type=bind,src=/var/lib/cni,dst=/cnilibdir,options=rbind:rw kubelet kubelet-tmp sh -c 'cp /var/lib/cni/* /cnilibdir/'
|
||||
{% endif %}
|
||||
args:
|
||||
creates: "/var/lib/cni"
|
||||
failed_when: false
|
||||
when: kubelet_container_check.rc == 0
|
||||
|
||||
- name: "Pre-upgrade | ensure kubelet container service is stopped if using host deployment"
|
||||
service:
|
||||
name: kubelet
|
||||
state: stopped
|
||||
when: kubelet_container_check.rc == 0
|
||||
|
||||
- name: "Pre-upgrade | ensure kubelet container is removed if using host deployment"
|
||||
shell: >-
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
{{ docker_bin_dir }}/docker rm -fv kubelet
|
||||
{% elif container_manager == 'containerd' %}
|
||||
{{ bin_dir }}/crictl stop kubelet && {{ bin_dir }}/crictl rm kubelet
|
||||
{% endif %}
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: remove_kubelet_container
|
||||
retries: 4
|
||||
until: remove_kubelet_container.rc == 0
|
||||
delay: 5
|
||||
when: kubelet_container_check.rc == 0
|
||||
Reference in New Issue
Block a user