kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

View File

@@ -0,0 +1,2 @@
#!/bin/sh
kubectl get pv -o go-template --template='{{ range .items }}{{ $metadata := .metadata }}{{ with $value := index .metadata.annotations "pv.kubernetes.io/provisioned-by" }}{{ if eq $value "kubernetes.io/cinder" }}{{printf "%s\n" $metadata.name}}{{ end }}{{ end }}{{ end }}'

1
extra_playbooks/inventory Symbolic link
View File

@@ -0,0 +1 @@
../inventory

View File

@@ -0,0 +1,30 @@
---
- name: Remove old cloud provider config
hosts: kube_node:kube_control_plane
tasks:
- name: Remove old cloud provider config
file:
path: "{{ item }}"
state: absent
with_items:
- /etc/kubernetes/cloud_config
- name: Migrate intree Cinder PV
hosts: kube_control_plane[0]
tasks:
- name: Include kubespray-default variables
include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
- name: Copy get_cinder_pvs.sh to master
copy:
src: get_cinder_pvs.sh
dest: /tmp
mode: u+rwx
- name: Get PVs provisioned by in-tree cloud provider
command: /tmp/get_cinder_pvs.sh
register: pvs
- name: Remove get_cinder_pvs.sh
file:
path: /tmp/get_cinder_pvs.sh
state: absent
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
loop: "{{ pvs.stdout_lines | list }}"

1
extra_playbooks/roles Symbolic link
View File

@@ -0,0 +1 @@
../roles

View File

@@ -0,0 +1,61 @@
---
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
### Additional information:
### * Will not upgrade etcd
### * Will not upgrade network plugins
### * Will not upgrade Docker
### * Will not pre-download containers or kubeadm
### * Currently does not support Vault deployment.
###
### In most cases, you probably want to use upgrade-cluster.yml playbook and
### not this one.
- name: Setup ssh config to use the bastion
hosts: localhost
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- name: Bootstrap hosts OS for Ansible
hosts: k8s_cluster:etcd:calico_rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- name: Preinstall
hosts: k8s_cluster:etcd:calico_rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
- name: Handle upgrades to master components first to maintain backwards compat.
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
roles:
- { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- name: Finally handle worker upgrades, based on given batch size
hosts: kube_node:!kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- { role: kubespray-defaults}

View File

@@ -0,0 +1,6 @@
---
- name: Wait for cloud-init to finish
hosts: all
tasks:
- name: Wait for cloud-init to finish
command: cloud-init status --wait