This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,19 @@
---
- name: Check which kube-control nodes are already members of the cluster
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
register: kube_control_planes_raw
ignore_errors: yes
changed_when: false
- name: Set fact joined_control_panes
set_fact:
joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}"
delegate_to: item
loop: "{{ groups['kube_control_plane'] }}"
when: kube_control_planes_raw is succeeded
run_once: yes
- name: Set fact first_kube_control_plane
set_fact:
first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}"

View File

@@ -0,0 +1,42 @@
---
- name: Check if secret for encrypting data at rest already exist
stat:
path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
get_attributes: no
get_checksum: no
get_mime: no
register: secrets_encryption_file
- name: Slurp secrets_encryption file if it exists
slurp:
src: "{{ kube_cert_dir }}/secrets_encryption.yaml"
register: secret_file_encoded
when: secrets_encryption_file.stat.exists
- name: Base 64 Decode slurped secrets_encryption.yaml file
set_fact:
secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}"
when: secrets_encryption_file.stat.exists
- name: Extract secret value from secrets_encryption.yaml
set_fact:
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
when: secrets_encryption_file.stat.exists
- name: Set kube_encrypt_token across master nodes
set_fact:
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
delegate_to: "{{ item }}"
delegate_facts: true
with_inventory_hostnames: kube_control_plane
when: kube_encrypt_token_extracted is defined
- name: Write secrets for encrypting secret data at rest
template:
src: secrets_encryption.yaml.j2
dest: "{{ kube_cert_dir }}/secrets_encryption.yaml"
owner: root
group: "{{ kube_cert_group }}"
mode: 0640
tags:
- kube-apiserver

View File

@@ -0,0 +1,28 @@
---
- name: Backup old certs and keys
copy:
src: "{{ kube_cert_dir }}/{{ item }}"
dest: "{{ kube_cert_dir }}/{{ item }}.old"
mode: preserve
remote_src: yes
with_items:
- apiserver.crt
- apiserver.key
- apiserver-kubelet-client.crt
- apiserver-kubelet-client.key
- front-proxy-client.crt
- front-proxy-client.key
ignore_errors: true # noqa ignore-errors
- name: Backup old confs
copy:
src: "{{ kube_config_dir }}/{{ item }}"
dest: "{{ kube_config_dir }}/{{ item }}.old"
mode: preserve
remote_src: yes
with_items:
- admin.conf
- controller-manager.conf
- kubelet.conf
- scheduler.conf
ignore_errors: true # noqa ignore-errors

View File

@@ -0,0 +1,26 @@
---
- name: Calculate etcd cert serial
command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial"
register: "etcd_client_cert_serial_result"
changed_when: false
tags:
- network
- name: Set etcd_client_cert_serial
set_fact:
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
tags:
- network
- name: Ensure etcdctl script is installed
import_role:
name: etcdctl
when: etcd_deployment_type == "kubeadm"
- name: Set ownership for etcd data directory
file:
path: "{{ etcd_data_dir }}"
owner: "{{ etcd_owner }}"
group: "{{ etcd_owner }}"
mode: 0700
when: etcd_deployment_type == "kubeadm"

View File

@@ -0,0 +1,24 @@
---
- name: Update server field in component kubeconfigs
lineinfile:
dest: "{{ kube_config_dir }}/{{ item }}"
regexp: '^ server: https'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
with_items:
- admin.conf
- controller-manager.conf
- kubelet.conf
- scheduler.conf
notify:
- "Master | Restart kube-controller-manager"
- "Master | Restart kube-scheduler"
- "Master | reload kubelet"
- name: Update etcd-servers for apiserver
lineinfile:
dest: "{{ kube_config_dir }}/manifests/kube-apiserver.yaml"
regexp: '^ - --etcd-servers='
line: ' - --etcd-servers={{ etcd_access_addresses }}'
when: etcd_deployment_type != "kubeadm"

View File

@@ -0,0 +1,79 @@
---
- name: Set kubeadm_discovery_address
set_fact:
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint | regex_replace('https://', '') }}
{%- endif %}
tags:
- facts
- name: Upload certificates so they are fresh and not expired
command: >-
{{ bin_dir }}/kubeadm init phase
--config {{ kube_config_dir }}/kubeadm-config.yaml
upload-certs
--upload-certs
register: kubeadm_upload_cert
when:
- inventory_hostname == first_kube_control_plane
- not kube_external_ca_mode
- name: Parse certificate key if not set
set_fact:
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
run_once: yes
when:
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
- name: Create kubeadm ControlPlane config
template:
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
mode: 0640
backup: yes
when:
- inventory_hostname != first_kube_control_plane
- not kubeadm_already_run.stat.exists
- name: Wait for k8s apiserver
wait_for:
host: "{{ kubeadm_discovery_address.split(':')[0] }}"
port: "{{ kubeadm_discovery_address.split(':')[1] }}"
timeout: 180
- name: check already run
debug:
msg: "{{ kubeadm_already_run.stat.exists }}"
- name: Reset cert directory
shell: >-
if [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then
{{ bin_dir }}/kubeadm reset -f --cert-dir {{ kube_cert_dir }};
fi
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
when:
- inventory_hostname != first_kube_control_plane
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
- not kube_external_ca_mode
- name: Joining control plane node to the cluster.
command: >-
{{ bin_dir }}/kubeadm join
--config {{ kube_config_dir }}/kubeadm-controlplane.yaml
--ignore-preflight-errors=all
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
register: kubeadm_join_control_plane
retries: 3
throttle: 1
until: kubeadm_join_control_plane is succeeded
when:
- inventory_hostname != first_kube_control_plane
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists

View File

@@ -0,0 +1,248 @@
---
- name: Install OIDC certificate
copy:
content: "{{ kube_oidc_ca_cert | b64decode }}"
dest: "{{ kube_oidc_ca_file }}"
owner: root
group: root
mode: "0644"
when:
- kube_oidc_auth
- kube_oidc_ca_cert is defined
- name: kubeadm | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
get_attributes: no
get_checksum: no
get_mime: no
register: kubeadm_already_run
- name: kubeadm | Backup kubeadm certs / kubeconfig
import_tasks: kubeadm-backup.yml
when:
- kubeadm_already_run.stat.exists
- name: kubeadm | aggregate all SANs
set_fact:
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
vars:
sans_base:
- "kubernetes"
- "kubernetes.default"
- "kubernetes.default.svc"
- "kubernetes.default.svc.{{ dns_domain }}"
- "{{ kube_apiserver_ip }}"
- "localhost"
- "127.0.0.1"
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}"
tags: facts
- name: Create audit-policy directory
file:
path: "{{ audit_policy_file | dirname }}"
state: directory
mode: 0640
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
- name: Write api audit policy yaml
template:
src: apiserver-audit-policy.yaml.j2
dest: "{{ audit_policy_file }}"
mode: 0640
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
- name: Write api audit webhook config yaml
template:
src: apiserver-audit-webhook-config.yaml.j2
dest: "{{ audit_webhook_config_file }}"
mode: 0640
when: kubernetes_audit_webhook|default(false)
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
- name: set kubeadm_config_api_fqdn define
set_fact:
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
when: loadbalancer_apiserver is defined
- name: Set kubeadm api version to v1beta3
set_fact:
kubeadmConfig_api_version: v1beta3
- name: kubeadm | Create kubeadm config
template:
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
mode: 0640
- name: kubeadm | Create directory to store admission control configurations
file:
path: "{{ kube_config_dir }}/admission-controls"
state: directory
mode: 0640
when: kube_apiserver_admission_control_config_file
- name: kubeadm | Push admission control config file
template:
src: "admission-controls.yaml.j2"
dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
mode: 0640
when: kube_apiserver_admission_control_config_file
- name: kubeadm | Push admission control config files
template:
src: "{{ item|lower }}.yaml.j2"
dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml"
mode: 0640
when:
- kube_apiserver_admission_control_config_file
- item in kube_apiserver_admission_plugins_needs_configuration
loop: "{{ kube_apiserver_enable_admission_plugins }}"
- name: kubeadm | Check if apiserver.crt contains all needed SANs
shell: |
set -o pipefail
for IP in {{ apiserver_ips | join(' ') }}; do
openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkip $IP | grep -q 'does match certificate' || echo 'NEED-RENEW'
done
for HOST in {{ apiserver_hosts | join(' ') }}; do
openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkhost $HOST | grep -q 'does match certificate' || echo 'NEED-RENEW'
done
vars:
apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}"
apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}"
args:
executable: /bin/bash
register: apiserver_sans_check
changed_when: "'NEED-RENEW' in apiserver_sans_check.stdout"
when:
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
- name: kubeadm | regenerate apiserver cert 1/2
file:
state: absent
path: "{{ kube_cert_dir }}/{{ item }}"
with_items:
- apiserver.crt
- apiserver.key
when:
- kubeadm_already_run.stat.exists
- apiserver_sans_check.changed
- not kube_external_ca_mode
- name: kubeadm | regenerate apiserver cert 2/2
command: >-
{{ bin_dir }}/kubeadm
init phase certs apiserver
--config={{ kube_config_dir }}/kubeadm-config.yaml
when:
- kubeadm_already_run.stat.exists
- apiserver_sans_check.changed
- not kube_external_ca_mode
- name: kubeadm | Create directory to store kubeadm patches
file:
path: "{{ kubeadm_patches.dest_dir }}"
state: directory
mode: 0640
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: kubeadm | Copy kubeadm patches from inventory files
copy:
src: "{{ kubeadm_patches.source_dir }}/"
dest: "{{ kubeadm_patches.dest_dir }}"
owner: "root"
mode: 0644
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: kubeadm | Initialize first master
command: >-
timeout -k 300s 300s
{{ bin_dir }}/kubeadm init
--config={{ kube_config_dir }}/kubeadm-config.yaml
--ignore-preflight-errors=all
--skip-phases={{ kubeadm_init_phases_skip | join(',') }}
{{ kube_external_ca_mode | ternary('', '--upload-certs') }}
register: kubeadm_init
# Retry is because upload config sometimes fails
retries: 3
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
when: inventory_hostname == first_kube_control_plane and not kubeadm_already_run.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: set kubeadm certificate key
set_fact:
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
when:
- kubeadm_certificate_key is not defined
- (item | trim) is match('.*--certificate-key.*')
- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
shell: >-
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :;
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }}
changed_when: false
when:
- inventory_hostname == first_kube_control_plane
- kubeadm_token is defined
- kubeadm_refresh_token
tags:
- kubeadm_token
- name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create"
changed_when: false
register: temp_token
retries: 5
delay: 5
until: temp_token is succeeded
delegate_to: "{{ first_kube_control_plane }}"
when: kubeadm_token is not defined
tags:
- kubeadm_token
- name: Set kubeadm_token
set_fact:
kubeadm_token: "{{ temp_token.stdout }}"
when: temp_token.stdout is defined
tags:
- kubeadm_token
- name: PodSecurityPolicy | install PodSecurityPolicy
include_tasks: psp-install.yml
when:
- podsecuritypolicy_enabled
- inventory_hostname == first_kube_control_plane
- name: kubeadm | Join other masters
include_tasks: kubeadm-secondary.yml
- name: kubeadm | upgrade kubernetes cluster
include_tasks: kubeadm-upgrade.yml
when:
- upgrade_cluster_setup
- kubeadm_already_run.stat.exists
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: kubeadm | Remove taint for master with node role
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
delegate_to: "{{ first_kube_control_plane }}"
with_items:
- "node-role.kubernetes.io/master:NoSchedule-"
- "node-role.kubernetes.io/control-plane:NoSchedule-"
when: inventory_hostname in groups['kube_node']
failed_when: false

View File

@@ -0,0 +1,75 @@
---
- name: kubeadm | Check api is up
uri:
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
validate_certs: false
when: inventory_hostname in groups['kube_control_plane']
register: _result
retries: 60
delay: 5
until: _result.status == 200
- name: kubeadm | Upgrade first master
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
upgrade apply -y {{ kube_version }}
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
--config={{ kube_config_dir }}/kubeadm-config.yaml
--ignore-preflight-errors=all
--allow-experimental-upgrades
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }}
--force
register: kubeadm_upgrade
# Retry is because upload config sometimes fails
retries: 3
until: kubeadm_upgrade.rc == 0
when: inventory_hostname == first_kube_control_plane
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: kubeadm | Upgrade other masters
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
upgrade apply -y {{ kube_version }}
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
--config={{ kube_config_dir }}/kubeadm-config.yaml
--ignore-preflight-errors=all
--allow-experimental-upgrades
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }}
--force
register: kubeadm_upgrade
when: inventory_hostname != first_kube_control_plane
failed_when:
- kubeadm_upgrade.rc != 0
- '"field is immutable" not in kubeadm_upgrade.stderr'
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: kubeadm | clean kubectl cache to refresh api types
file:
path: "{{ item }}"
state: absent
with_items:
- /root/.kube/cache
- /root/.kube/http-cache
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
command: >-
{{ kubectl }}
-n kube-system
scale deployment/coredns --replicas 0
register: scale_down_coredns
retries: 6
delay: 5
until: scale_down_coredns is succeeded
run_once: yes
when:
- kubeadm_scale_down_coredns_enabled
- dns_mode not in ['coredns', 'coredns_dual']
changed_when: false

View File

@@ -0,0 +1,18 @@
---
- name: Fixup kubelet client cert rotation 1/2
lineinfile:
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ client-certificate-data: '
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: yes
notify:
- "Master | reload kubelet"
- name: Fixup kubelet client cert rotation 2/2
lineinfile:
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ client-key-data: '
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: yes
notify:
- "Master | reload kubelet"

View File

@@ -0,0 +1,104 @@
---
- import_tasks: pre-upgrade.yml
tags:
- k8s-pre-upgrade
- name: Create webhook token auth config
template:
src: webhook-token-auth-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
mode: 0640
when: kube_webhook_token_auth|default(false)
- name: Create webhook authorization config
template:
src: webhook-authorization-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
mode: 0640
when: kube_webhook_authorization|default(false)
- name: Create kube-scheduler config
template:
src: kubescheduler-config.yaml.j2
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
mode: 0644
- import_tasks: encrypt-at-rest.yml
when:
- kube_encrypt_secret_data
- name: Install | Copy kubectl binary from download dir
copy:
src: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}"
dest: "{{ bin_dir }}/kubectl"
mode: 0755
remote_src: true
tags:
- kubectl
- upgrade
- name: Install kubectl bash completion
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
when: ansible_os_family in ["Debian","RedHat"]
tags:
- kubectl
ignore_errors: true # noqa ignore-errors
- name: Set kubectl bash completion file permissions
file:
path: /etc/bash_completion.d/kubectl.sh
owner: root
group: root
mode: 0755
when: ansible_os_family in ["Debian","RedHat"]
tags:
- kubectl
- upgrade
ignore_errors: true # noqa ignore-errors
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
set_fact:
kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
when: podsecuritypolicy_enabled
- name: Define nodes already joined to existing cluster and first_kube_control_plane
import_tasks: define-first-kube-control.yml
- name: Include kubeadm setup
import_tasks: kubeadm-setup.yml
- name: Include kubeadm etcd extra tasks
include_tasks: kubeadm-etcd.yml
when: etcd_deployment_type == "kubeadm"
- name: Include kubeadm secondary server apiserver fixes
include_tasks: kubeadm-fix-apiserver.yml
- name: Include kubelet client cert rotation fixes
include_tasks: kubelet-fix-client-cert-rotation.yml
when: kubelet_rotate_certificates
- name: Install script to renew K8S control plane certificates
template:
src: k8s-certs-renew.sh.j2
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
mode: 0755
- name: Renew K8S control plane certificates monthly 1/2
template:
src: "{{ item }}.j2"
dest: "/etc/systemd/system/{{ item }}"
mode: 0644
with_items:
- k8s-certs-renew.service
- k8s-certs-renew.timer
register: k8s_certs_units
when: auto_renew_certificates
- name: Renew K8S control plane certificates monthly 2/2
systemd:
name: k8s-certs-renew.timer
enabled: yes
state: started
daemon-reload: "{{ k8s_certs_units is changed }}"
when: auto_renew_certificates

View File

@@ -0,0 +1,21 @@
---
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
file:
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
state: absent
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed|default(false)
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
register: remove_master_container
retries: 10
until: remove_master_container.rc == 0
delay: 1

View File

@@ -0,0 +1,38 @@
---
- name: Check AppArmor status
command: which apparmor_parser
register: apparmor_status
failed_when: false
changed_when: apparmor_status.rc != 0
- name: Set apparmor_enabled
set_fact:
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
- name: Render templates for PodSecurityPolicy
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0640
register: psp_manifests
with_items:
- {file: psp.yml, type: psp, name: psp}
- {file: psp-cr.yml, type: clusterrole, name: psp-cr}
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
- name: Add policies, roles, bindings for PodSecurityPolicy
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
register: result
until: result is succeeded
retries: 10
delay: 6
with_items: "{{ psp_manifests.results }}"
environment:
KUBECONFIG: "{{ kube_config_dir }}/admin.conf"
loop_control:
label: "{{ item.item.file }}"