kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

153
scripts/collect-info.yaml Normal file
View File

@@ -0,0 +1,153 @@
---
- name: Collect debug info
hosts: all
become: true
gather_facts: no
vars:
docker_bin_dir: /usr/bin
bin_dir: /usr/local/bin
ansible_ssh_pipelining: true
etcd_cert_dir: /etc/ssl/etcd/ssl
kube_network_plugin: calico
archive_dirname: collect-info
commands:
- name: timedate_info
cmd: timedatectl status
- name: kernel_info
cmd: uname -r
- name: docker_info
cmd: "{{ docker_bin_dir }}/docker info"
- name: ip_info
cmd: ip -4 -o a
- name: route_info
cmd: ip ro
- name: proc_info
cmd: ps auxf | grep -v ]$
- name: systemctl_failed_info
cmd: systemctl --state=failed --no-pager
- name: k8s_info
cmd: "{{ bin_dir }}/kubectl get all --all-namespaces -o wide"
- name: errors_info
cmd: journalctl -p err --no-pager
- name: etcd_info
cmd: "{{ bin_dir }}/etcdctl endpoint --cluster health"
- name: calico_info
cmd: "{{ bin_dir }}/calicoctl node status"
when: '{{ kube_network_plugin == "calico" }}'
- name: calico_workload_info
cmd: "{{ bin_dir }}/calicoctl get workloadEndpoint -o wide"
when: '{{ kube_network_plugin == "calico" }}'
- name: calico_pool_info
cmd: "{{ bin_dir }}/calicoctl get ippool -o wide"
when: '{{ kube_network_plugin == "calico" }}'
- name: weave_info
cmd: weave report
when: '{{ kube_network_plugin == "weave" }}'
- name: weave_logs
cmd: "{{ docker_bin_dir }}/docker logs weave"
when: '{{ kube_network_plugin == "weave" }}'
- name: kube_describe_all
cmd: "{{ bin_dir }}/kubectl describe all --all-namespaces"
- name: kube_describe_nodes
cmd: "{{ bin_dir }}/kubectl describe nodes"
- name: kubelet_logs
cmd: journalctl -u kubelet --no-pager
- name: coredns_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=coredns -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done"
- name: apiserver_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done"
- name: controller_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l component=kube-controller-manager -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done"
- name: scheduler_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l component=kube-scheduler -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done"
- name: proxy_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=kube-proxy -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done"
- name: nginx_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=kube-nginx -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system; done"
- name: flannel_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l app=flannel -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system flannel-container; done"
when: '{{ kube_network_plugin == "flannel" }}'
- name: canal_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=canal-node -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system flannel; done"
when: '{{ kube_network_plugin == "canal" }}'
- name: calico_policy_logs
cmd: "for i in `{{ bin_dir }}/kubectl get pods -n kube-system -l k8s-app=calico-kube-controllers -o jsonpath={.items..metadata.name}`;
do {{ bin_dir }}/kubectl logs ${i} -n kube-system ; done"
when: '{{ kube_network_plugin in ["canal", "calico"] }}'
- name: helm_show_releases_history
cmd: "for i in `{{ bin_dir }}/helm list -q`; do {{ bin_dir }}/helm history ${i} --col-width=0; done"
when: "{{ helm_enabled | default(true) }}"
logs:
- /var/log/syslog
- /var/log/daemon.log
- /var/log/kern.log
- /var/log/dpkg.log
- /var/log/apt/history.log
- /var/log/yum.log
- /var/log/messages
- /var/log/dmesg
environment:
ETCDCTL_API: "3"
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
tasks:
- name: Set etcd_access_addresses
set_fact:
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ item }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
when: "'etcd' in groups"
- name: Storing commands output
shell: "{{ item.cmd }} &> {{ item.name }}"
failed_when: false
with_items: "{{ commands }}"
when: item.when | default(True)
no_log: True
- name: Fetch results
fetch:
src: "{{ item.name }}"
dest: "/tmp/{{ archive_dirname }}/commands"
with_items: "{{ commands }}"
when: item.when | default(True)
failed_when: false
- name: Fetch logs
fetch:
src: "{{ item }}"
dest: "/tmp/{{ archive_dirname }}/logs"
with_items: "{{ logs }}"
failed_when: false
- name: Pack results and logs
community.general.archive:
path: "/tmp/{{ archive_dirname }}"
dest: "{{ dir | default('.') }}/logs.tar.gz"
remove: true
mode: 0640
delegate_to: localhost
connection: local
become: false
run_once: true
- name: Clean up collected command outputs
file:
path: "{{ item.name }}"
state: absent
with_items: "{{ commands }}"

65
scripts/download_hash.py Normal file
View File

@@ -0,0 +1,65 @@
#!/usr/bin/env python3
# After a new version of Kubernetes has been released,
# run this script to update roles/kubespray-defaults/defaults/main/download.yml
# with new hashes.
import hashlib
import sys
import requests
from ruamel.yaml import YAML
MAIN_YML = "../roles/kubespray-defaults/defaults/main/download.yml"
def open_main_yaml():
yaml = YAML()
yaml.explicit_start = True
yaml.preserve_quotes = True
yaml.width = 4096
with open(MAIN_YML, "r") as main_yml:
data = yaml.load(main_yml)
return data, yaml
def download_hash(versions):
architectures = ["arm", "arm64", "amd64", "ppc64le"]
downloads = ["kubelet", "kubectl", "kubeadm"]
data, yaml = open_main_yaml()
for download in downloads:
checksum_name = f"{download}_checksums"
for arch in architectures:
for version in versions:
if not version.startswith("v"):
version = f"v{version}"
url = f"https://dl.k8s.io/release/{version}/bin/linux/{arch}/{download}"
download_file = requests.get(url, allow_redirects=True)
download_file.raise_for_status()
sha256sum = hashlib.sha256(download_file.content).hexdigest()
data[checksum_name][arch][version] = sha256sum
with open(MAIN_YML, "w") as main_yml:
yaml.dump(data, main_yml)
print(f"\n\nUpdated {MAIN_YML}\n")
def usage():
print(f"USAGE:\n {sys.argv[0]} [k8s_version1] [[k8s_version2]....[k8s_versionN]]")
def main(argv=None):
if not argv:
argv = sys.argv[1:]
if not argv:
usage()
return 1
download_hash(argv)
return 0
if __name__ == "__main__":
sys.exit(main())

259
scripts/download_hash.sh Executable file
View File

@@ -0,0 +1,259 @@
#!/bin/bash
set -o errexit
set -o pipefail
if [[ ${DEBUG:-false} == "true" ]]; then
set -o xtrace
fi
checksums_file="$(git rev-parse --show-toplevel)/roles/kubespray-defaults/defaults/main/checksums.yml"
downloads_folder=/tmp/kubespray_binaries
function get_versions {
local type="$1"
local name="$2"
# NOTE: Limit in the number of versions to be register in the checksums file
local limit="${3:-7}"
local python_app="${4:-"import sys,re;tags=[tag.rstrip() for tag in sys.stdin if re.match(\'^v?(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)$\',tag)];print(\' \'.join(tags[:$limit]))"}"
local version=""
local attempt_counter=0
readonly max_attempts=5
until [ "$version" ]; do
version=$("_get_$type" "$name" "$python_app")
if [ "$version" ]; then
break
elif [ ${attempt_counter} -eq ${max_attempts} ]; then
echo "Max attempts reached"
exit 1
fi
attempt_counter=$((attempt_counter + 1))
sleep $((attempt_counter * 2))
done
echo "${version}"
}
function _get_github_tags {
local repo="$1"
local python_app="$2"
# The number of results per page (max 100).
tags="$(curl -s "https://api.github.com/repos/$repo/tags?per_page=100")"
if [ "$tags" ]; then
echo "$tags" | grep -Po '"name":.*?[^\\]",' | awk -F '"' '{print $4}' | python -c "$python_app"
fi
}
function _vercmp {
local v1=$1
local op=$2
local v2=$3
local result
# sort the two numbers with sort's "-V" argument. Based on if v2
# swapped places with v1, we can determine ordering.
result=$(echo -e "$v1\n$v2" | sort -V | head -1)
case $op in
"==")
[ "$v1" = "$v2" ]
return
;;
">")
[ "$v1" != "$v2" ] && [ "$result" = "$v2" ]
return
;;
"<")
[ "$v1" != "$v2" ] && [ "$result" = "$v1" ]
return
;;
">=")
[ "$result" = "$v2" ]
return
;;
"<=")
[ "$result" = "$v1" ]
return
;;
*)
echo "unrecognised op: $op"
exit 1
;;
esac
}
function get_checksums {
local binary="$1"
local version_exceptions="cri_dockerd_archive nerdctl_archive containerd_archive youki"
declare -A skip_archs=(
["crio_archive"]="arm ppc64le"
["calicoctl_binary"]="arm"
["ciliumcli_binary"]="ppc64le"
["etcd_binary"]="arm"
["cri_dockerd_archive"]="arm ppc64le"
["runc"]="arm"
["crun"]="arm ppc64le"
["youki"]="arm arm64 ppc64le"
["kata_containers_binary"]="arm arm64 ppc64le"
["gvisor_runsc_binary"]="arm ppc64le"
["gvisor_containerd_shim_binary"]="arm ppc64le"
["containerd_archive"]="arm"
["skopeo_binary"]="arm ppc64le"
)
echo "${binary}_checksums:" | tee --append "$checksums_file"
for arch in arm arm64 amd64 ppc64le; do
echo " $arch:" | tee --append "$checksums_file"
for version in "${@:2}"; do
checksum=0
[[ "${skip_archs[$binary]}" == *"$arch"* ]] || checksum=$(_get_checksum "$binary" "$version" "$arch")
[[ "$version_exceptions" != *"$binary"* ]] || version=${version#v}
echo " $version: $checksum" | tee --append "$checksums_file"
done
done
}
function get_krew_archive_checksums {
declare -A archs=(
["linux"]="arm arm64 amd64"
["darwin"]="arm64 amd64"
["windows"]="amd64"
)
echo "krew_archive_checksums:" | tee --append "$checksums_file"
for os in "${!archs[@]}"; do
echo " $os:" | tee --append "$checksums_file"
for arch in arm arm64 amd64 ppc64le; do
echo " $arch:" | tee --append "$checksums_file"
for version in "$@"; do
checksum=0
[[ " ${archs[$os]} " != *" $arch "* ]] || checksum=$(_get_checksum "krew_archive" "$version" "$arch" "$os")
echo " $version: $checksum" | tee --append "$checksums_file"
done
done
done
}
function get_calico_crds_archive_checksums {
echo "calico_crds_archive_checksums:" | tee --append "$checksums_file"
for version in "$@"; do
echo " $version: $(_get_checksum "calico_crds_archive" "$version")" | tee --append "$checksums_file"
done
}
function get_containerd_archive_checksums {
declare -A support_version_history=(
["arm"]="2"
["arm64"]="1.6.0"
["amd64"]="1.5.5"
["ppc64le"]="1.6.7"
)
echo "containerd_archive_checksums:" | tee --append "$checksums_file"
for arch in arm arm64 amd64 ppc64le; do
echo " $arch:" | tee --append "$checksums_file"
for version in "${@}"; do
_vercmp "${version#v}" '>=' "${support_version_history[$arch]}" && checksum=$(_get_checksum "containerd_archive" "$version" "$arch") || checksum=0
echo " ${version#v}: $checksum" | tee --append "$checksums_file"
done
done
}
function get_k8s_checksums {
local binary=$1
echo "${binary}_checksums:" | tee --append "$checksums_file"
echo " arm:" | tee --append "$checksums_file"
for version in "${@:2}"; do
_vercmp "${version#v}" '<' "1.27" && checksum=$(_get_checksum "$binary" "$version" "arm") || checksum=0
echo " ${version}: $checksum" | tee --append "$checksums_file"
done
for arch in arm64 amd64 ppc64le; do
echo " $arch:" | tee --append "$checksums_file"
for version in "${@:2}"; do
echo " ${version}: $(_get_checksum "$binary" "$version" "$arch")" | tee --append "$checksums_file"
done
done
}
function _get_checksum {
local binary="$1"
local version="$2"
local arch="${3:-amd64}"
local os="${4:-linux}"
local target="$downloads_folder/$binary/$version-$os-$arch"
readonly github_url="https://github.com"
readonly github_releases_url="$github_url/%s/releases/download/$version/%s"
readonly github_archive_url="$github_url/%s/archive/%s"
readonly google_url="https://storage.googleapis.com"
readonly release_url="https://dl.k8s.io"
readonly k8s_url="$release_url/release/$version/bin/$os/$arch/%s"
# Download URLs
declare -A urls=(
["crictl"]="$(printf "$github_releases_url" "kubernetes-sigs/cri-tools" "crictl-$version-$os-$arch.tar.gz")"
["crio_archive"]="$google_url/cri-o/artifacts/cri-o.$arch.$version.tar.gz"
["kubelet"]="$(printf "$k8s_url" "kubelet")"
["kubectl"]="$(printf "$k8s_url" "kubectl")"
["kubeadm"]="$(printf "$k8s_url" "kubeadm")"
["etcd_binary"]="$(printf "$github_releases_url" "etcd-io/etcd" "etcd-$version-$os-$arch.tar.gz")"
["cni_binary"]="$(printf "$github_releases_url" "containernetworking/plugins" "cni-plugins-$os-$arch-$version.tgz")"
["calicoctl_binary"]="$(printf "$github_releases_url" "projectcalico/calico" "calicoctl-$os-$arch")"
["ciliumcli_binary"]="$(printf "$github_releases_url" "cilium/cilium-cli" "cilium-$os-$arch.tar.gz")"
["calico_crds_archive"]="$(printf "$github_archive_url" "projectcalico/calico" "$version.tar.gz")"
["krew_archive"]="$(printf "$github_releases_url" "kubernetes-sigs/krew" "krew-${os}_$arch.tar.gz")"
["helm_archive"]="https://get.helm.sh/helm-$version-$os-$arch.tar.gz"
["cri_dockerd_archive"]="$(printf "$github_releases_url" "Mirantis/cri-dockerd" "cri-dockerd-${version#v}.$arch.tgz")"
["runc"]="$(printf "$github_releases_url" "opencontainers/runc" "runc.$arch")"
["crun"]="$(printf "$github_releases_url" "containers/crun" "crun-$version-$os-$arch")"
["youki"]="$(printf "$github_releases_url" "containers/youki" "youki_$([ $version == "v0.0.1" ] && echo "v0_0_1" || echo "${version#v}" | sed 's|\.|_|g')_$os.tar.gz")"
["kata_containers_binary"]="$(printf "$github_releases_url" "kata-containers/kata-containers" "kata-static-$version-${arch//amd64/x86_64}.tar.xz")"
["gvisor_runsc_binary"]="$(printf "$google_url/gvisor/releases/release/$version/%s/runsc" "$(echo "$arch" | sed -e 's/amd64/x86_64/' -e 's/arm64/aarch64/')")"
["gvisor_containerd_shim_binary"]="$(printf "$google_url/gvisor/releases/release/$version/%s/containerd-shim-runsc-v1" "$(echo "$arch" | sed -e 's/amd64/x86_64/' -e 's/arm64/aarch64/')")"
["nerdctl_archive"]="$(printf "$github_releases_url" "containerd/nerdctl" "nerdctl-${version#v}-$os-$([ "$arch" == "arm" ] && echo "arm-v7" || echo "$arch" ).tar.gz")"
["containerd_archive"]="$(printf "$github_releases_url" "containerd/containerd" "containerd-${version#v}-$os-$arch.tar.gz")"
["skopeo_binary"]="$(printf "$github_releases_url" "lework/skopeo-binary" "skopeo-$os-$arch")"
["yq"]="$(printf "$github_releases_url" "mikefarah/yq" "yq_${os}_$arch")"
)
mkdir -p "$(dirname $target)"
[ -f "$target" ] || curl -LfSs -o "${target}" "${urls[$binary]}"
sha256sum ${target} | awk '{print $1}'
}
function main {
mkdir -p "$(dirname "$checksums_file")"
echo "---" | tee "$checksums_file"
get_checksums crictl $(get_versions github_tags kubernetes-sigs/cri-tools 4)
get_checksums crio_archive $(get_versions github_tags cri-o/cri-o)
kubernetes_versions=$(get_versions github_tags kubernetes/kubernetes 25)
echo "# Checksum" | tee --append "$checksums_file"
echo "# Kubernetes versions above Kubespray's current target version are untested and should be used with caution." | tee --append "$checksums_file"
get_k8s_checksums kubelet $kubernetes_versions
get_checksums kubectl $kubernetes_versions
get_k8s_checksums kubeadm $kubernetes_versions
get_checksums etcd_binary $(get_versions github_tags etcd-io/etcd)
get_checksums cni_binary $(get_versions github_tags containernetworking/plugins)
calico_versions=$(get_versions github_tags projectcalico/calico 20)
get_checksums calicoctl_binary $calico_versions
get_checksums ciliumcli_binary $(get_versions github_tags cilium/cilium-cli 10)
get_calico_crds_archive_checksums $calico_versions
get_krew_archive_checksums $(get_versions github_tags kubernetes-sigs/krew 2)
get_checksums helm_archive $(get_versions github_tags helm/helm)
get_checksums cri_dockerd_archive $(get_versions github_tags Mirantis/cri-dockerd)
get_checksums runc $(get_versions github_tags opencontainers/runc 5)
get_checksums crun $(get_versions github_tags containers/crun)
get_checksums youki $(get_versions github_tags containers/youki)
get_checksums kata_containers_binary $(get_versions github_tags kata-containers/kata-containers 10)
gvisor_versions=$(get_versions github_tags google/gvisor 0 "import sys,re;tags=[tag[8:16] for tag in sys.stdin if re.match('^release-?(0|[1-9]\d*)\.(0|[1-9]\d*)$',tag)];print(' '.join(tags[:9]))")
get_checksums gvisor_runsc_binary $gvisor_versions
get_checksums gvisor_containerd_shim_binary $gvisor_versions
get_checksums nerdctl_archive $(get_versions github_tags containerd/nerdctl)
get_containerd_archive_checksums $(get_versions github_tags containerd/containerd 30)
get_checksums skopeo_binary $(get_versions github_tags lework/skopeo-binary)
get_checksums yq $(get_versions github_tags mikefarah/yq)
}
if [[ ${__name__:-"__main__"} == "__main__" ]]; then
main
fi

12
scripts/gen_tags.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/sh
set -eo pipefail
#Generate MD formatted tags from roles and cluster yaml files
printf "|%25s |%9s\n" "Tag name" "Used for"
echo "|--------------------------|---------"
tags=$(grep -r tags: . | perl -ne '/tags:\s\[?(([\w\-_]+,?\s?)+)/ && printf "%s ", "$1"'|\
perl -ne 'print join "\n", split /\s|,/' | sort -u)
for tag in $tags; do
match=$(cat docs/ansible.md | perl -ne "/^\|\s+${tag}\s\|\s+((\S+\s?)+)/ && printf \$1")
printf "|%25s |%s\n" "${tag}" " ${match}"
done

View File

@@ -0,0 +1,2 @@
openrc
venv

View File

@@ -0,0 +1,24 @@
# gitlab-branch-cleanup
Cleanup old branches in a GitLab project
## Installation
```shell
pip install -r requirements.txt
python main.py --help
```
## Usage
```console
$ export GITLAB_API_TOKEN=foobar
$ python main.py kargo-ci/kubernetes-sigs-kubespray
Deleting branch pr-5220-containerd-systemd from 2020-02-17 ...
Deleting branch pr-5561-feature/cinder_csi_fixes from 2020-02-17 ...
Deleting branch pr-5607-add-flatcar from 2020-02-17 ...
Deleting branch pr-5616-fix-typo from 2020-02-17 ...
Deleting branch pr-5634-helm_310 from 2020-02-18 ...
Deleting branch pr-5644-patch-1 from 2020-02-15 ...
Deleting branch pr-5647-master from 2020-02-17 ...
```

View File

@@ -0,0 +1,38 @@
import gitlab
import argparse
import os
import sys
from datetime import timedelta, datetime, timezone
parser = argparse.ArgumentParser(
description='Cleanup old branches in a GitLab project')
parser.add_argument('--api', default='https://gitlab.com/',
help='URL of GitLab API, defaults to gitlab.com')
parser.add_argument('--age', type=int, default=30,
help='Delete branches older than this many days')
parser.add_argument('--prefix', default='pr-',
help='Cleanup only branches with names matching this prefix')
parser.add_argument('--dry-run', action='store_true',
help='Do not delete anything')
parser.add_argument('project',
help='Path of the GitLab project')
args = parser.parse_args()
limit = datetime.now(timezone.utc) - timedelta(days=args.age)
if os.getenv('GITLAB_API_TOKEN', '') == '':
print("Environment variable GITLAB_API_TOKEN is required.")
sys.exit(2)
gl = gitlab.Gitlab(args.api, private_token=os.getenv('GITLAB_API_TOKEN'))
gl.auth()
p = gl.projects.get(args.project)
for b in p.branches.list(all=True):
date = datetime.fromisoformat(b.commit['created_at'])
if date < limit and not b.protected and not b.default and b.name.startswith(args.prefix):
print("Deleting branch %s from %s ..." %
(b.name, date.date().isoformat()))
if not args.dry_run:
b.delete()

View File

@@ -0,0 +1 @@
python-gitlab

22
scripts/gitlab-runner.sh Normal file
View File

@@ -0,0 +1,22 @@
#!/bin/sh
docker run -d --name gitlab-runner --restart always -v /srv/gitlab-runner/cache:/srv/gitlab-runner/cache -v /srv/gitlab-runner/config:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:v1.10.0
#
#/srv/gitlab-runner/config# cat config.toml
#concurrent = 10
#check_interval = 1
#[[runners]]
# name = "2edf3d71fe19"
# url = "https://gitlab.com"
# token = "THE TOKEN-CHANGEME"
# executor = "docker"
# [runners.docker]
# tls_verify = false
# image = "docker:latest"
# privileged = true
# disable_cache = false
# cache_dir = "/srv/gitlab-runner/cache"
# volumes = ["/var/run/docker.sock:/var/run/docker.sock", "/srv/gitlab-runner/cache:/cache:rw"]
# [runners.cache]

1
scripts/openstack-cleanup/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
openrc

View File

@@ -0,0 +1,21 @@
# openstack-cleanup
Tool to deletes openstack servers older than a specific age (default 4h).
Useful to cleanup orphan servers that are left behind when CI is manually cancelled or fails unexpectedly.
## Installation
```shell
pip install -r requirements.txt
python main.py --help
```
## Usage
```console
$ python main.py
This will delete VMs... (ctrl+c to cancel)
Will delete server example1
Will delete server example2
```

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env python
import argparse
import openstack
import logging
import datetime
import time
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
PAUSE_SECONDS = 5
log = logging.getLogger('openstack-cleanup')
parser = argparse.ArgumentParser(description='Cleanup OpenStack resources')
parser.add_argument('-v', '--verbose', action='store_true',
help='Increase verbosity')
parser.add_argument('--hours', type=int, default=4,
help='Age (in hours) of VMs to cleanup (default: 4h)')
parser.add_argument('--dry-run', action='store_true',
help='Do not delete anything')
args = parser.parse_args()
oldest_allowed = datetime.datetime.now() - datetime.timedelta(hours=args.hours)
def main():
if args.dry_run:
print('Running in dry-run mode')
else:
print('This will delete resources... (ctrl+c to cancel)')
time.sleep(PAUSE_SECONDS)
conn = openstack.connect()
print('Servers...')
map_if_old(conn.compute.delete_server,
conn.compute.servers())
print('Security groups...')
map_if_old(conn.network.delete_security_group,
conn.network.security_groups())
print('Ports...')
try:
map_if_old(conn.network.delete_port,
conn.network.ports())
except openstack.exceptions.ConflictException as ex:
# Need to find subnet-id which should be removed from a router
for sn in conn.network.subnets():
try:
fn_if_old(conn.network.delete_subnet, sn)
except openstack.exceptions.ConflictException:
for r in conn.network.routers():
print("Deleting subnet %s from router %s", sn, r)
try:
conn.network.remove_interface_from_router(
r, subnet_id=sn.id)
except Exception as ex:
print("Failed to delete subnet from router as %s", ex)
for ip in conn.network.ips():
fn_if_old(conn.network.delete_ip, ip)
# After removing unnecessary subnet from router, retry to delete ports
map_if_old(conn.network.delete_port,
conn.network.ports())
print('Subnets...')
map_if_old(conn.network.delete_subnet,
conn.network.subnets())
print('Networks...')
for n in conn.network.networks():
if not n.is_router_external:
fn_if_old(conn.network.delete_network, n)
# runs the given fn to all elements of the that are older than allowed
def map_if_old(fn, items):
for item in items:
fn_if_old(fn, item)
# run the given fn function only if the passed item is older than allowed
def fn_if_old(fn, item):
created_at = datetime.datetime.strptime(item.created_at, DATE_FORMAT)
if item.name == "default": # skip default security group
return
if created_at < oldest_allowed:
print('Will delete %(name)s (%(id)s)' % item)
if not args.dry_run:
fn(item)
if __name__ == '__main__':
# execute only if run as a script
main()

View File

@@ -0,0 +1,2 @@
openstacksdk>=0.43.0
six

51
scripts/premoderator.sh Normal file
View File

@@ -0,0 +1,51 @@
#!/bin/bash
# A naive premoderation script to allow Gitlab CI pipeline on a specific PRs' comment
# Exits with 0, if the pipeline is good to go
# Exits with 1, if the user is not allowed to start pipeline
# Exits with 2, if script is unable to get issue id from CI_COMMIT_REF_NAME variable
# Exits with 3, if missing the magic comment in the pipeline to start the pipeline
CURL_ARGS="-fs --retry 4 --retry-delay 5"
MAGIC="${MAGIC:-ci check this}"
exit_code=0
# Get PR number from CI_COMMIT_REF_NAME
issue=$(echo ${CI_COMMIT_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1')
if [ "$issue" = "" ]; then
echo "Unable to get issue id from: $CI_COMMIT_REF_NAME"
exit 2
fi
echo "Fetching labels from PR $issue"
labels=$(curl ${CURL_ARGS} "https://api.github.com/repos/kubernetes-sigs/kubespray/issues/${issue}?access_token=${GITHUB_TOKEN}" | jq '{labels: .labels}' | jq '.labels[].name' | jq -s '')
labels_to_patch=$(echo -n $labels | jq '. + ["needs-ci-auth"]' | tr -d "\n")
echo "Checking for '$MAGIC' comment in PR $issue"
# Get the user name from the PR comments with the wanted magic incantation casted
user=$(curl ${CURL_ARGS} "https://api.github.com/repos/kubernetes-sigs/kubespray/issues/${issue}/comments" | jq -M "map(select(.body | contains (\"$MAGIC\"))) | .[0] .user.login" | tr -d '"')
# Check for the required user group membership to allow (exit 0) or decline (exit >0) the pipeline
if [ "$user" = "" ] || [ "$user" = "null" ]; then
echo "Missing '$MAGIC' comment from one of the OWNERS"
exit_code=3
else
echo "Found comment from user: $user"
curl ${CURL_ARGS} "https://api.github.com/orgs/kubernetes-sigs/members/${user}"
if [ $? -ne 0 ]; then
echo "User does not have permissions to start CI run"
exit_code=1
else
labels_to_patch=$(echo -n $labels | jq '. - ["needs-ci-auth"]' | tr -d "\n")
exit_code=0
echo "$user has allowed CI to start"
fi
fi
# Patch labels on PR
curl ${CURL_ARGS} --request PATCH "https://api.github.com/repos/kubernetes-sigs/kubespray/issues/${issue}?access_token=${GITHUB_TOKEN}" -H "Content-Type: application/json" -d "{\"labels\": ${labels_to_patch}}"
exit $exit_code