update
This commit is contained in:
8
kubespray/roles/kubernetes/client/defaults/main.yml
Normal file
8
kubespray/roles/kubernetes/client/defaults/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
kubeconfig_localhost: false
|
||||
kubeconfig_localhost_ansible_host: false
|
||||
kubectl_localhost: false
|
||||
artifacts_dir: "{{ inventory_dir }}/artifacts"
|
||||
|
||||
kube_config_dir: "/etc/kubernetes"
|
||||
kube_apiserver_port: "6443"
|
||||
112
kubespray/roles/kubernetes/client/tasks/main.yml
Normal file
112
kubespray/roles/kubernetes/client/tasks/main.yml
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
- name: Set external kube-apiserver endpoint
|
||||
set_fact:
|
||||
external_apiserver_address: >-
|
||||
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%}
|
||||
{{ loadbalancer_apiserver.address }}
|
||||
{%- elif kubeconfig_localhost_ansible_host is defined and kubeconfig_localhost_ansible_host -%}
|
||||
{{ hostvars[groups['kube_control_plane'][0]].ansible_host }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_access_address }}
|
||||
{%- endif -%}
|
||||
external_apiserver_port: >-
|
||||
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%}
|
||||
{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_port }}
|
||||
{%- endif -%}
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Create kube config dir for current/ansible become user
|
||||
file:
|
||||
path: "{{ ansible_env.HOME | default('/root') }}/.kube"
|
||||
mode: "0700"
|
||||
state: directory
|
||||
|
||||
- name: Copy admin kubeconfig to current/ansible become user home
|
||||
copy:
|
||||
src: "{{ kube_config_dir }}/admin.conf"
|
||||
dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
remote_src: yes
|
||||
mode: "0600"
|
||||
backup: yes
|
||||
|
||||
- name: Create kube artifacts dir
|
||||
file:
|
||||
path: "{{ artifacts_dir }}"
|
||||
mode: "0750"
|
||||
state: directory
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
become: no
|
||||
run_once: yes
|
||||
when: kubeconfig_localhost
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
host: "{{ kube_apiserver_access_address }}"
|
||||
port: "{{ kube_apiserver_port }}"
|
||||
timeout: 180
|
||||
|
||||
- name: Get admin kubeconfig from remote host
|
||||
slurp:
|
||||
src: "{{ kube_config_dir }}/admin.conf"
|
||||
run_once: yes
|
||||
register: raw_admin_kubeconfig
|
||||
when: kubeconfig_localhost
|
||||
|
||||
- name: Convert kubeconfig to YAML
|
||||
set_fact:
|
||||
admin_kubeconfig: "{{ raw_admin_kubeconfig.content | b64decode | from_yaml }}"
|
||||
when: kubeconfig_localhost
|
||||
|
||||
- name: Override username in kubeconfig
|
||||
set_fact:
|
||||
final_admin_kubeconfig: "{{ admin_kubeconfig | combine(override_cluster_name, recursive=true) | combine(override_context, recursive=true) | combine(override_user, recursive=true) }}"
|
||||
vars:
|
||||
cluster_infos: "{{ admin_kubeconfig['clusters'][0]['cluster'] }}"
|
||||
user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
|
||||
username: "kubernetes-admin-{{ cluster_name }}"
|
||||
context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
|
||||
override_cluster_name: "{{ { 'clusters': [ { 'cluster': (cluster_infos|combine({'server': 'https://'+external_apiserver_address+':'+(external_apiserver_port|string)})), 'name': cluster_name } ] } }}"
|
||||
override_context: "{{ { 'contexts': [ { 'context': { 'user': username, 'cluster': cluster_name }, 'name': context } ], 'current-context': context } }}"
|
||||
override_user: "{{ { 'users': [ { 'name': username, 'user': user_certs } ] } }}"
|
||||
when: kubeconfig_localhost
|
||||
|
||||
- name: Write admin kubeconfig on ansible host
|
||||
copy:
|
||||
content: "{{ final_admin_kubeconfig | to_nice_yaml(indent=2) }}"
|
||||
dest: "{{ artifacts_dir }}/admin.conf"
|
||||
mode: 0600
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
become: no
|
||||
run_once: yes
|
||||
when: kubeconfig_localhost
|
||||
|
||||
- name: Copy kubectl binary to ansible host
|
||||
fetch:
|
||||
src: "{{ bin_dir }}/kubectl"
|
||||
dest: "{{ artifacts_dir }}/kubectl"
|
||||
flat: yes
|
||||
validate_checksum: no
|
||||
register: copy_binary_result
|
||||
until: copy_binary_result is not failed
|
||||
retries: 20
|
||||
become: no
|
||||
run_once: yes
|
||||
when: kubectl_localhost
|
||||
|
||||
- name: create helper script kubectl.sh on ansible host
|
||||
copy:
|
||||
content: |
|
||||
#!/bin/bash
|
||||
${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@"
|
||||
dest: "{{ artifacts_dir }}/kubectl.sh"
|
||||
mode: 0755
|
||||
become: no
|
||||
run_once: yes
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
when: kubectl_localhost and kubeconfig_localhost
|
||||
@@ -0,0 +1,31 @@
|
||||
---
|
||||
# Set etcd user/group
|
||||
etcd_owner: etcd
|
||||
|
||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
etcd_cert_alt_names:
|
||||
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||
- "etcd.kube-system.svc"
|
||||
- "etcd.kube-system"
|
||||
- "etcd"
|
||||
etcd_cert_alt_ips: []
|
||||
|
||||
etcd_heartbeat_interval: "250"
|
||||
etcd_election_timeout: "5000"
|
||||
|
||||
# etcd_snapshot_count: "10000"
|
||||
|
||||
etcd_metrics: "basic"
|
||||
|
||||
## A dictionary of extra environment variables to add to etcd.env, formatted like:
|
||||
## etcd_extra_vars:
|
||||
## var1: "value1"
|
||||
## var2: "value2"
|
||||
## Note this is different from the etcd role with ETCD_ prfexi, caps, and underscores
|
||||
etcd_extra_vars: {}
|
||||
|
||||
# etcd_quota_backend_bytes: "2147483648"
|
||||
# etcd_max_request_bytes: "1572864"
|
||||
|
||||
etcd_compaction_retention: "8"
|
||||
@@ -0,0 +1,118 @@
|
||||
---
|
||||
# bind address for kube-proxy
|
||||
kube_proxy_bind_address: '0.0.0.0'
|
||||
|
||||
# acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
|
||||
# default value of 'application/json'. This field will control all connections to the server used by a particular
|
||||
# client.
|
||||
kube_proxy_client_accept_content_types: ''
|
||||
|
||||
# burst allows extra queries to accumulate when a client is exceeding its rate.
|
||||
kube_proxy_client_burst: 10
|
||||
|
||||
# contentType is the content type used when sending data to the server from this client.
|
||||
kube_proxy_client_content_type: application/vnd.kubernetes.protobuf
|
||||
|
||||
# kubeconfig is the path to a KubeConfig file.
|
||||
# Leave as empty string to generate from other fields
|
||||
kube_proxy_client_kubeconfig: ''
|
||||
|
||||
# qps controls the number of queries per second allowed for this connection.
|
||||
kube_proxy_client_qps: 5
|
||||
|
||||
# How often configuration from the apiserver is refreshed. Must be greater than 0.
|
||||
kube_proxy_config_sync_period: 15m0s
|
||||
|
||||
### Conntrack
|
||||
# maxPerCore is the maximum number of NAT connections to track
|
||||
# per CPU core (0 to leave the limit as-is and ignore min).
|
||||
kube_proxy_conntrack_max_per_core: 32768
|
||||
|
||||
# min is the minimum value of connect-tracking records to allocate,
|
||||
# regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
|
||||
kube_proxy_conntrack_min: 131072
|
||||
|
||||
# tcpCloseWaitTimeout is how long an idle conntrack entry
|
||||
# in CLOSE_WAIT state will remain in the conntrack
|
||||
# table. (e.g. '60s'). Must be greater than 0 to set.
|
||||
kube_proxy_conntrack_tcp_close_wait_timeout: 1h0m0s
|
||||
|
||||
# tcpEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||
# (e.g. '2s'). Must be greater than 0 to set.
|
||||
kube_proxy_conntrack_tcp_established_timeout: 24h0m0s
|
||||
|
||||
# Enables profiling via web interface on /debug/pprof handler.
|
||||
# Profiling handlers will be handled by metrics server.
|
||||
kube_proxy_enable_profiling: false
|
||||
|
||||
# bind address for kube-proxy health check
|
||||
kube_proxy_healthz_bind_address: 0.0.0.0:10256
|
||||
|
||||
# If using the pure iptables proxy, SNAT everything. Note that it breaks any
|
||||
# policy engine.
|
||||
kube_proxy_masquerade_all: false
|
||||
|
||||
# If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with.
|
||||
# Must be within the range [0, 31].
|
||||
kube_proxy_masquerade_bit: 14
|
||||
|
||||
# The minimum interval of how often the iptables or ipvs rules can be refreshed as
|
||||
# endpoints and services change (e.g. '5s', '1m', '2h22m').
|
||||
kube_proxy_min_sync_period: 0s
|
||||
|
||||
# The maximum interval of how often iptables or ipvs rules are refreshed (e.g. '5s', '1m', '2h22m').
|
||||
# Must be greater than 0.
|
||||
kube_proxy_sync_period: 30s
|
||||
|
||||
# A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.
|
||||
kube_proxy_exclude_cidrs: []
|
||||
|
||||
# The ipvs scheduler type when proxy mode is ipvs
|
||||
# rr: round-robin
|
||||
# lc: least connection
|
||||
# dh: destination hashing
|
||||
# sh: source hashing
|
||||
# sed: shortest expected delay
|
||||
# nq: never queue
|
||||
kube_proxy_scheduler: rr
|
||||
|
||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||
# must be set to true for MetalLB, kube-vip(ARP enabled) to work
|
||||
kube_proxy_strict_arp: false
|
||||
|
||||
# kube_proxy_tcp_timeout is the timeout value used for idle IPVS TCP sessions.
|
||||
# The default value is 0, which preserves the current timeout value on the system.
|
||||
kube_proxy_tcp_timeout: 0s
|
||||
|
||||
# kube_proxy_tcp_fin_timeout is the timeout value used for IPVS TCP sessions after receiving a FIN.
|
||||
# The default value is 0, which preserves the current timeout value on the system.
|
||||
kube_proxy_tcp_fin_timeout: 0s
|
||||
|
||||
# kube_proxy_udp_timeout is the timeout value used for IPVS UDP packets.
|
||||
# The default value is 0, which preserves the current timeout value on the system.
|
||||
kube_proxy_udp_timeout: 0s
|
||||
|
||||
# The IP address and port for the metrics server to serve on
|
||||
# (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces)
|
||||
kube_proxy_metrics_bind_address: 127.0.0.1:10249
|
||||
|
||||
# A string slice of values which specify the addresses to use for NodePorts.
|
||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
|
||||
# The default empty string slice ([]) means to use all local addresses.
|
||||
kube_proxy_nodeport_addresses: >-
|
||||
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
|
||||
[{{ kube_proxy_nodeport_addresses_cidr }}]
|
||||
{%- else -%}
|
||||
[]
|
||||
{%- endif -%}
|
||||
|
||||
# oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
|
||||
kube_proxy_oom_score_adj: -999
|
||||
|
||||
# portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
# in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen.
|
||||
kube_proxy_port_range: ''
|
||||
|
||||
# udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
# Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
kube_proxy_udp_idle_timeout: 250ms
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# Extra args passed by kubeadm
|
||||
kube_kubeadm_scheduler_extra_args: {}
|
||||
|
||||
# Associated interface must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_scheduler_bind_address: 0.0.0.0
|
||||
|
||||
# ClientConnection options (e.g. Burst, QPS) except from kubeconfig.
|
||||
kube_scheduler_client_conn_extra_opts: {}
|
||||
|
||||
# Additional KubeSchedulerConfiguration settings (e.g. metricsBindAddress).
|
||||
kube_scheduler_config_extra_opts: {}
|
||||
|
||||
# List of scheduler extenders (dicts), each holding the values of how to
|
||||
# communicate with the extender.
|
||||
kube_scheduler_extenders: []
|
||||
|
||||
# Leader Election options (e.g. ResourceName, RetryPerion) except from
|
||||
# LeaseDuration and Renew deadline which are defined in following vars.
|
||||
kube_scheduler_leader_elect_extra_opts: {}
|
||||
|
||||
# Leader election lease duration
|
||||
kube_scheduler_leader_elect_lease_duration: 15s
|
||||
|
||||
# Leader election lease timeout
|
||||
kube_scheduler_leader_elect_renew_deadline: 10s
|
||||
|
||||
# Lisf of scheduling profiles (ditcs) supported by kube-scheduler
|
||||
kube_scheduler_profiles: []
|
||||
|
||||
# Extra volume mounts
|
||||
scheduler_extra_volumes: {}
|
||||
230
kubespray/roles/kubernetes/control-plane/defaults/main/main.yml
Normal file
230
kubespray/roles/kubernetes/control-plane/defaults/main/main.yml
Normal file
@@ -0,0 +1,230 @@
|
||||
---
|
||||
# disable upgrade cluster
|
||||
upgrade_cluster_setup: false
|
||||
|
||||
# By default the external API listens on all interfaces, this can be changed to
|
||||
# listen on a specific address/interface.
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too.
|
||||
kube_apiserver_bind_address: 0.0.0.0
|
||||
|
||||
# A port range to reserve for services with NodePort visibility.
|
||||
# Inclusive at both ends of the range.
|
||||
kube_apiserver_node_port_range: "30000-32767"
|
||||
|
||||
# ETCD backend for k8s data
|
||||
kube_apiserver_storage_backend: etcd3
|
||||
|
||||
# CIS 1.2.26
|
||||
# Validate that the service account token
|
||||
# in the request is actually present in etcd.
|
||||
kube_apiserver_service_account_lookup: true
|
||||
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Associated interfaces must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_controller_manager_bind_address: 0.0.0.0
|
||||
|
||||
# Leader election lease durations and timeouts for controller-manager
|
||||
kube_controller_manager_leader_elect_lease_duration: 15s
|
||||
kube_controller_manager_leader_elect_renew_deadline: 10s
|
||||
|
||||
# discovery_timeout modifies the discovery timeout
|
||||
discovery_timeout: 5m0s
|
||||
|
||||
# Instruct first master to refresh kubeadm token
|
||||
kubeadm_refresh_token: true
|
||||
|
||||
# Scale down coredns replicas to 0 if not using coredns dns_mode
|
||||
kubeadm_scale_down_coredns_enabled: true
|
||||
|
||||
# audit support
|
||||
kubernetes_audit: false
|
||||
# path to audit log file
|
||||
audit_log_path: /var/log/audit/kube-apiserver-audit.log
|
||||
# num days
|
||||
audit_log_maxage: 30
|
||||
# the num of audit logs to retain
|
||||
audit_log_maxbackups: 1
|
||||
# the max size in MB to retain
|
||||
audit_log_maxsize: 100
|
||||
# policy file
|
||||
audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||
# custom audit policy rules (to replace the default ones)
|
||||
# audit_policy_custom_rules: |
|
||||
# - level: None
|
||||
# users: []
|
||||
# verbs: []
|
||||
# resources: []
|
||||
|
||||
# audit log hostpath
|
||||
audit_log_name: audit-logs
|
||||
audit_log_hostpath: /var/log/kubernetes/audit
|
||||
audit_log_mountpath: "{{ audit_log_path | dirname }}"
|
||||
|
||||
# audit policy hostpath
|
||||
audit_policy_name: audit-policy
|
||||
audit_policy_hostpath: "{{ audit_policy_file | dirname }}"
|
||||
audit_policy_mountpath: "{{ audit_policy_hostpath }}"
|
||||
|
||||
# audit webhook support
|
||||
kubernetes_audit_webhook: false
|
||||
|
||||
# path to audit webhook config file
|
||||
audit_webhook_config_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-webhook-config.yaml"
|
||||
audit_webhook_server_url: "https://audit.app"
|
||||
audit_webhook_server_extra_args: {}
|
||||
audit_webhook_mode: batch
|
||||
audit_webhook_batch_max_size: 100
|
||||
audit_webhook_batch_max_wait: 1s
|
||||
|
||||
kube_controller_node_monitor_grace_period: 40s
|
||||
kube_controller_node_monitor_period: 5s
|
||||
kube_controller_terminated_pod_gc_threshold: 12500
|
||||
kube_apiserver_request_timeout: "1m0s"
|
||||
kube_apiserver_pod_eviction_not_ready_timeout_seconds: "300"
|
||||
kube_apiserver_pod_eviction_unreachable_timeout_seconds: "300"
|
||||
|
||||
# 1.10+ admission plugins
|
||||
kube_apiserver_enable_admission_plugins: []
|
||||
|
||||
# enable admission plugins configuration
|
||||
kube_apiserver_admission_control_config_file: false
|
||||
|
||||
# data structure to configure EventRateLimit admission plugin
|
||||
# this should have the following structure:
|
||||
# kube_apiserver_admission_event_rate_limits:
|
||||
# <limit_name>:
|
||||
# type: <limit_type>
|
||||
# qps: <qps_value>
|
||||
# burst: <burst_value>
|
||||
# cache_size: <cache_size_value>
|
||||
kube_apiserver_admission_event_rate_limits: {}
|
||||
|
||||
kube_pod_security_use_default: false
|
||||
kube_pod_security_default_enforce: baseline
|
||||
kube_pod_security_default_enforce_version: latest
|
||||
kube_pod_security_default_audit: restricted
|
||||
kube_pod_security_default_audit_version: latest
|
||||
kube_pod_security_default_warn: restricted
|
||||
kube_pod_security_default_warn_version: latest
|
||||
kube_pod_security_exemptions_usernames: []
|
||||
kube_pod_security_exemptions_runtime_class_names: []
|
||||
kube_pod_security_exemptions_namespaces:
|
||||
- kube-system
|
||||
|
||||
# 1.10+ list of disabled admission plugins
|
||||
kube_apiserver_disable_admission_plugins: []
|
||||
|
||||
# extra runtime config
|
||||
kube_api_runtime_config: []
|
||||
|
||||
## Enable/Disable Kube API Server Authentication Methods
|
||||
kube_token_auth: false
|
||||
kube_oidc_auth: false
|
||||
|
||||
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
|
||||
kube_webhook_token_auth: false
|
||||
kube_webhook_token_auth_url_skip_tls_verify: false
|
||||
# kube_webhook_token_auth_url: https://...
|
||||
## base64-encoded string of the webhook's CA certificate
|
||||
# kube_webhook_token_auth_ca_data: "LS0t..."
|
||||
|
||||
## Variables for webhook token authz https://kubernetes.io/docs/reference/access-authn-authz/webhook/
|
||||
# kube_webhook_authorization_url: https://...
|
||||
kube_webhook_authorization: false
|
||||
kube_webhook_authorization_url_skip_tls_verify: false
|
||||
|
||||
|
||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
|
||||
|
||||
# kube_oidc_url: https:// ...
|
||||
# kube_oidc_client_id: kubernetes
|
||||
## Optional settings for OIDC
|
||||
# kube_oidc_username_claim: sub
|
||||
# kube_oidc_username_prefix: 'oidc:'
|
||||
# kube_oidc_groups_claim: groups
|
||||
# kube_oidc_groups_prefix: 'oidc:'
|
||||
# Copy oidc CA file to the following path if needed
|
||||
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
|
||||
# Optionally include a base64-encoded oidc CA cert
|
||||
# kube_oidc_ca_cert: c3RhY2thYnVzZS5jb20...
|
||||
|
||||
# List of the preferred NodeAddressTypes to use for kubelet connections.
|
||||
kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP'
|
||||
|
||||
## Extra args for k8s components passing by kubeadm
|
||||
kube_kubeadm_apiserver_extra_args: {}
|
||||
kube_kubeadm_controller_extra_args: {}
|
||||
|
||||
## Extra control plane host volume mounts
|
||||
## Example:
|
||||
# apiserver_extra_volumes:
|
||||
# - name: name
|
||||
# hostPath: /host/path
|
||||
# mountPath: /mount/path
|
||||
# readOnly: true
|
||||
apiserver_extra_volumes: {}
|
||||
controller_manager_extra_volumes: {}
|
||||
|
||||
## Encrypting Secret Data at Rest
|
||||
kube_encrypt_secret_data: false
|
||||
kube_encrypt_token: "{{ lookup('password', credentials_dir + '/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
|
||||
# Must be either: aescbc, secretbox or aesgcm
|
||||
kube_encryption_algorithm: "secretbox"
|
||||
# Which kubernetes resources to encrypt
|
||||
kube_encryption_resources: [secrets]
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
kube_override_hostname: >-
|
||||
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
{%- else -%}
|
||||
{{ inventory_hostname }}
|
||||
{%- endif -%}
|
||||
|
||||
secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret"
|
||||
|
||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
|
||||
# tls_min_version: ""
|
||||
|
||||
## Support tls cipher suites.
|
||||
# tls_cipher_suites:
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
|
||||
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_RSA_WITH_RC4_128_SHA
|
||||
|
||||
## Amount of time to retain events. (default 1h0m0s)
|
||||
event_ttl_duration: "1h0m0s"
|
||||
|
||||
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||
auto_renew_certificates: false
|
||||
# First Monday of each month
|
||||
auto_renew_certificates_systemd_calendar: "{{ 'Mon *-*-1,2,3,4,5,6,7 03:' ~
|
||||
groups['kube_control_plane'].index(inventory_hostname) ~ '0:00' }}"
|
||||
# kubeadm renews all the certificates during control plane upgrade.
|
||||
# If we have requirement like without renewing certs upgrade the cluster,
|
||||
# we can opt out from the default behavior by setting kubeadm_upgrade_auto_cert_renewal to false
|
||||
kubeadm_upgrade_auto_cert_renewal: true
|
||||
123
kubespray/roles/kubernetes/control-plane/handlers/main.yml
Normal file
123
kubespray/roles/kubernetes/control-plane/handlers/main.yml
Normal file
@@ -0,0 +1,123 @@
|
||||
---
|
||||
- name: Master | restart kubelet
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | reload systemd
|
||||
- Master | reload kubelet
|
||||
- Master | wait for master static pods
|
||||
|
||||
- name: Master | wait for master static pods
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | wait for the apiserver to be running
|
||||
- Master | wait for kube-scheduler
|
||||
- Master | wait for kube-controller-manager
|
||||
|
||||
- name: Master | Restart apiserver
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | Remove apiserver container docker
|
||||
- Master | Remove apiserver container containerd/crio
|
||||
- Master | wait for the apiserver to be running
|
||||
|
||||
- name: Master | Restart kube-scheduler
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | Remove scheduler container docker
|
||||
- Master | Remove scheduler container containerd/crio
|
||||
- Master | wait for kube-scheduler
|
||||
|
||||
- name: Master | Restart kube-controller-manager
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | Remove controller manager container docker
|
||||
- Master | Remove controller manager container containerd/crio
|
||||
- Master | wait for kube-controller-manager
|
||||
|
||||
- name: Master | reload systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Master | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
|
||||
- name: Master | Remove apiserver container docker
|
||||
shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f
|
||||
register: remove_apiserver_container
|
||||
retries: 10
|
||||
until: remove_apiserver_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager == "docker"
|
||||
|
||||
- name: Master | Remove apiserver container containerd/crio
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: remove_apiserver_container
|
||||
retries: 10
|
||||
until: remove_apiserver_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager in ['containerd', 'crio']
|
||||
|
||||
- name: Master | Remove scheduler container docker
|
||||
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
register: remove_scheduler_container
|
||||
retries: 10
|
||||
until: remove_scheduler_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager == "docker"
|
||||
|
||||
- name: Master | Remove scheduler container containerd/crio
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: remove_scheduler_container
|
||||
retries: 10
|
||||
until: remove_scheduler_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager in ['containerd', 'crio']
|
||||
|
||||
- name: Master | Remove controller manager container docker
|
||||
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
register: remove_cm_container
|
||||
retries: 10
|
||||
until: remove_cm_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager == "docker"
|
||||
|
||||
- name: Master | Remove controller manager container containerd/crio
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: remove_cm_container
|
||||
retries: 10
|
||||
until: remove_cm_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager in ['containerd', 'crio']
|
||||
|
||||
- name: Master | wait for kube-scheduler
|
||||
vars:
|
||||
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||
uri:
|
||||
url: https://{{ endpoint }}:10259/healthz
|
||||
validate_certs: no
|
||||
register: scheduler_result
|
||||
until: scheduler_result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
|
||||
- name: Master | wait for kube-controller-manager
|
||||
vars:
|
||||
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||
uri:
|
||||
url: https://{{ endpoint }}:10257/healthz
|
||||
validate_certs: no
|
||||
register: controller_manager_result
|
||||
until: controller_manager_result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
|
||||
- name: Master | wait for the apiserver to be running
|
||||
uri:
|
||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||
validate_certs: no
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
11
kubespray/roles/kubernetes/control-plane/meta/main.yml
Normal file
11
kubespray/roles/kubernetes/control-plane/meta/main.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: kubernetes/tokens
|
||||
when: kube_token_auth
|
||||
tags:
|
||||
- k8s-secrets
|
||||
- role: adduser
|
||||
user: "{{ addusers.etcd }}"
|
||||
when:
|
||||
- etcd_deployment_type == "kubeadm"
|
||||
- not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos)
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
- name: Check which kube-control nodes are already members of the cluster
|
||||
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
|
||||
register: kube_control_planes_raw
|
||||
ignore_errors: yes
|
||||
changed_when: false
|
||||
|
||||
- name: Set fact joined_control_panes
|
||||
set_fact:
|
||||
joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}"
|
||||
delegate_to: item
|
||||
loop: "{{ groups['kube_control_plane'] }}"
|
||||
when: kube_control_planes_raw is succeeded
|
||||
run_once: yes
|
||||
|
||||
- name: Set fact first_kube_control_plane
|
||||
set_fact:
|
||||
first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}"
|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Check if secret for encrypting data at rest already exist
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: secrets_encryption_file
|
||||
|
||||
- name: Slurp secrets_encryption file if it exists
|
||||
slurp:
|
||||
src: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
register: secret_file_encoded
|
||||
when: secrets_encryption_file.stat.exists
|
||||
|
||||
- name: Base 64 Decode slurped secrets_encryption.yaml file
|
||||
set_fact:
|
||||
secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}"
|
||||
when: secrets_encryption_file.stat.exists
|
||||
|
||||
- name: Extract secret value from secrets_encryption.yaml
|
||||
set_fact:
|
||||
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
|
||||
when: secrets_encryption_file.stat.exists
|
||||
|
||||
- name: Set kube_encrypt_token across master nodes
|
||||
set_fact:
|
||||
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
|
||||
delegate_to: "{{ item }}"
|
||||
delegate_facts: true
|
||||
with_inventory_hostnames: kube_control_plane
|
||||
when: kube_encrypt_token_extracted is defined
|
||||
|
||||
- name: Write secrets for encrypting secret data at rest
|
||||
template:
|
||||
src: secrets_encryption.yaml.j2
|
||||
dest: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
owner: root
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
tags:
|
||||
- kube-apiserver
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Backup old certs and keys
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/{{ item }}"
|
||||
dest: "{{ kube_cert_dir }}/{{ item }}.old"
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
- apiserver-kubelet-client.crt
|
||||
- apiserver-kubelet-client.key
|
||||
- front-proxy-client.crt
|
||||
- front-proxy-client.key
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Backup old confs
|
||||
copy:
|
||||
src: "{{ kube_config_dir }}/{{ item }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item }}.old"
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- admin.conf
|
||||
- controller-manager.conf
|
||||
- kubelet.conf
|
||||
- scheduler.conf
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Calculate etcd cert serial
|
||||
command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial"
|
||||
register: "etcd_client_cert_serial_result"
|
||||
changed_when: false
|
||||
tags:
|
||||
- network
|
||||
|
||||
- name: Set etcd_client_cert_serial
|
||||
set_fact:
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
|
||||
tags:
|
||||
- network
|
||||
|
||||
- name: Ensure etcdctl script is installed
|
||||
import_role:
|
||||
name: etcdctl
|
||||
when: etcd_deployment_type == "kubeadm"
|
||||
|
||||
- name: Set ownership for etcd data directory
|
||||
file:
|
||||
path: "{{ etcd_data_dir }}"
|
||||
owner: "{{ etcd_owner }}"
|
||||
group: "{{ etcd_owner }}"
|
||||
mode: 0700
|
||||
when: etcd_deployment_type == "kubeadm"
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
|
||||
- name: Update server field in component kubeconfigs
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||
regexp: '^ server: https'
|
||||
line: ' server: {{ kube_apiserver_endpoint }}'
|
||||
backup: yes
|
||||
with_items:
|
||||
- admin.conf
|
||||
- controller-manager.conf
|
||||
- kubelet.conf
|
||||
- scheduler.conf
|
||||
notify:
|
||||
- "Master | Restart kube-controller-manager"
|
||||
- "Master | Restart kube-scheduler"
|
||||
- "Master | reload kubelet"
|
||||
|
||||
- name: Update etcd-servers for apiserver
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/manifests/kube-apiserver.yaml"
|
||||
regexp: '^ - --etcd-servers='
|
||||
line: ' - --etcd-servers={{ etcd_access_addresses }}'
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
@@ -0,0 +1,79 @@
|
||||
---
|
||||
- name: Set kubeadm_discovery_address
|
||||
set_fact:
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint | regex_replace('https://', '') }}
|
||||
{%- endif %}
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Upload certificates so they are fresh and not expired
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
upload-certs
|
||||
--upload-certs
|
||||
register: kubeadm_upload_cert
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
|
||||
run_once: yes
|
||||
when:
|
||||
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
|
||||
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
|
||||
|
||||
- name: Create kubeadm ControlPlane config
|
||||
template:
|
||||
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
|
||||
mode: 0640
|
||||
backup: yes
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
host: "{{ kubeadm_discovery_address.split(':')[0] }}"
|
||||
port: "{{ kubeadm_discovery_address.split(':')[1] }}"
|
||||
timeout: 180
|
||||
|
||||
|
||||
- name: check already run
|
||||
debug:
|
||||
msg: "{{ kubeadm_already_run.stat.exists }}"
|
||||
|
||||
- name: Reset cert directory
|
||||
shell: >-
|
||||
if [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then
|
||||
{{ bin_dir }}/kubeadm reset -f --cert-dir {{ kube_cert_dir }};
|
||||
fi
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: Joining control plane node to the cluster.
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir }}/kubeadm-controlplane.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
register: kubeadm_join_control_plane
|
||||
retries: 3
|
||||
throttle: 1
|
||||
until: kubeadm_join_control_plane is succeeded
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
248
kubespray/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
Normal file
248
kubespray/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
Normal file
@@ -0,0 +1,248 @@
|
||||
---
|
||||
- name: Install OIDC certificate
|
||||
copy:
|
||||
content: "{{ kube_oidc_ca_cert | b64decode }}"
|
||||
dest: "{{ kube_oidc_ca_file }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when:
|
||||
- kube_oidc_auth
|
||||
- kube_oidc_ca_cert is defined
|
||||
|
||||
- name: kubeadm | Check if kubeadm has already run
|
||||
stat:
|
||||
path: "/var/lib/kubelet/config.yaml"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubeadm_already_run
|
||||
|
||||
- name: kubeadm | Backup kubeadm certs / kubeconfig
|
||||
import_tasks: kubeadm-backup.yml
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | aggregate all SANs
|
||||
set_fact:
|
||||
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
|
||||
vars:
|
||||
sans_base:
|
||||
- "kubernetes"
|
||||
- "kubernetes.default"
|
||||
- "kubernetes.default.svc"
|
||||
- "kubernetes.default.svc.{{ dns_domain }}"
|
||||
- "{{ kube_apiserver_ip }}"
|
||||
- "localhost"
|
||||
- "127.0.0.1"
|
||||
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
|
||||
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
|
||||
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
|
||||
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
|
||||
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
|
||||
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
|
||||
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
|
||||
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
|
||||
sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}"
|
||||
tags: facts
|
||||
|
||||
- name: Create audit-policy directory
|
||||
file:
|
||||
path: "{{ audit_policy_file | dirname }}"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||
|
||||
- name: Write api audit policy yaml
|
||||
template:
|
||||
src: apiserver-audit-policy.yaml.j2
|
||||
dest: "{{ audit_policy_file }}"
|
||||
mode: 0640
|
||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||
|
||||
- name: Write api audit webhook config yaml
|
||||
template:
|
||||
src: apiserver-audit-webhook-config.yaml.j2
|
||||
dest: "{{ audit_webhook_config_file }}"
|
||||
mode: 0640
|
||||
when: kubernetes_audit_webhook|default(false)
|
||||
|
||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||
- name: set kubeadm_config_api_fqdn define
|
||||
set_fact:
|
||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
|
||||
- name: Set kubeadm api version to v1beta3
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1beta3
|
||||
|
||||
- name: kubeadm | Create kubeadm config
|
||||
template:
|
||||
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
mode: 0640
|
||||
|
||||
- name: kubeadm | Create directory to store admission control configurations
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/admission-controls"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kube_apiserver_admission_control_config_file
|
||||
|
||||
- name: kubeadm | Push admission control config file
|
||||
template:
|
||||
src: "admission-controls.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
|
||||
mode: 0640
|
||||
when: kube_apiserver_admission_control_config_file
|
||||
|
||||
- name: kubeadm | Push admission control config files
|
||||
template:
|
||||
src: "{{ item|lower }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml"
|
||||
mode: 0640
|
||||
when:
|
||||
- kube_apiserver_admission_control_config_file
|
||||
- item in kube_apiserver_admission_plugins_needs_configuration
|
||||
loop: "{{ kube_apiserver_enable_admission_plugins }}"
|
||||
|
||||
- name: kubeadm | Check if apiserver.crt contains all needed SANs
|
||||
shell: |
|
||||
set -o pipefail
|
||||
for IP in {{ apiserver_ips | join(' ') }}; do
|
||||
openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkip $IP | grep -q 'does match certificate' || echo 'NEED-RENEW'
|
||||
done
|
||||
for HOST in {{ apiserver_hosts | join(' ') }}; do
|
||||
openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkhost $HOST | grep -q 'does match certificate' || echo 'NEED-RENEW'
|
||||
done
|
||||
vars:
|
||||
apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}"
|
||||
apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: apiserver_sans_check
|
||||
changed_when: "'NEED-RENEW' in apiserver_sans_check.stdout"
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 1/2
|
||||
file:
|
||||
state: absent
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 2/2
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm
|
||||
init phase certs apiserver
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: kubeadm | Create directory to store kubeadm patches
|
||||
file:
|
||||
path: "{{ kubeadm_patches.dest_dir }}"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||
|
||||
- name: kubeadm | Copy kubeadm patches from inventory files
|
||||
copy:
|
||||
src: "{{ kubeadm_patches.source_dir }}/"
|
||||
dest: "{{ kubeadm_patches.dest_dir }}"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: >-
|
||||
timeout -k 300s 300s
|
||||
{{ bin_dir }}/kubeadm init
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--skip-phases={{ kubeadm_init_phases_skip | join(',') }}
|
||||
{{ kube_external_ca_mode | ternary('', '--upload-certs') }}
|
||||
register: kubeadm_init
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
|
||||
when: inventory_hostname == first_kube_control_plane and not kubeadm_already_run.stat.exists
|
||||
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: set kubeadm certificate key
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
|
||||
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
|
||||
when:
|
||||
- kubeadm_certificate_key is not defined
|
||||
- (item | trim) is match('.*--certificate-key.*')
|
||||
|
||||
- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :;
|
||||
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }}
|
||||
changed_when: false
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- kubeadm_token is defined
|
||||
- kubeadm_refresh_token
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: Create kubeadm token for joining nodes with 24h expiration (default)
|
||||
command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create"
|
||||
changed_when: false
|
||||
register: temp_token
|
||||
retries: 5
|
||||
delay: 5
|
||||
until: temp_token is succeeded
|
||||
delegate_to: "{{ first_kube_control_plane }}"
|
||||
when: kubeadm_token is not defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: Set kubeadm_token
|
||||
set_fact:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
when: temp_token.stdout is defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: PodSecurityPolicy | install PodSecurityPolicy
|
||||
include_tasks: psp-install.yml
|
||||
when:
|
||||
- podsecuritypolicy_enabled
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
|
||||
- name: kubeadm | Join other masters
|
||||
include_tasks: kubeadm-secondary.yml
|
||||
|
||||
- name: kubeadm | upgrade kubernetes cluster
|
||||
include_tasks: kubeadm-upgrade.yml
|
||||
when:
|
||||
- upgrade_cluster_setup
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||
- name: kubeadm | Remove taint for master with node role
|
||||
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
||||
delegate_to: "{{ first_kube_control_plane }}"
|
||||
with_items:
|
||||
- "node-role.kubernetes.io/master:NoSchedule-"
|
||||
- "node-role.kubernetes.io/control-plane:NoSchedule-"
|
||||
when: inventory_hostname in groups['kube_node']
|
||||
failed_when: false
|
||||
@@ -0,0 +1,75 @@
|
||||
---
|
||||
- name: kubeadm | Check api is up
|
||||
uri:
|
||||
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
register: _result
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: _result.status == 200
|
||||
|
||||
- name: kubeadm | Upgrade first master
|
||||
command: >-
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--allow-experimental-upgrades
|
||||
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }}
|
||||
--force
|
||||
register: kubeadm_upgrade
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_upgrade.rc == 0
|
||||
when: inventory_hostname == first_kube_control_plane
|
||||
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: kubeadm | Upgrade other masters
|
||||
command: >-
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--allow-experimental-upgrades
|
||||
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }}
|
||||
--force
|
||||
register: kubeadm_upgrade
|
||||
when: inventory_hostname != first_kube_control_plane
|
||||
failed_when:
|
||||
- kubeadm_upgrade.rc != 0
|
||||
- '"field is immutable" not in kubeadm_upgrade.stderr'
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: kubeadm | clean kubectl cache to refresh api types
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- /root/.kube/cache
|
||||
- /root/.kube/http-cache
|
||||
|
||||
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
||||
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
||||
command: >-
|
||||
{{ kubectl }}
|
||||
-n kube-system
|
||||
scale deployment/coredns --replicas 0
|
||||
register: scale_down_coredns
|
||||
retries: 6
|
||||
delay: 5
|
||||
until: scale_down_coredns is succeeded
|
||||
run_once: yes
|
||||
when:
|
||||
- kubeadm_scale_down_coredns_enabled
|
||||
- dns_mode not in ['coredns', 'coredns_dual']
|
||||
changed_when: false
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Fixup kubelet client cert rotation 1/2
|
||||
lineinfile:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '^ client-certificate-data: '
|
||||
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||
backup: yes
|
||||
notify:
|
||||
- "Master | reload kubelet"
|
||||
|
||||
- name: Fixup kubelet client cert rotation 2/2
|
||||
lineinfile:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '^ client-key-data: '
|
||||
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||
backup: yes
|
||||
notify:
|
||||
- "Master | reload kubelet"
|
||||
104
kubespray/roles/kubernetes/control-plane/tasks/main.yml
Normal file
104
kubespray/roles/kubernetes/control-plane/tasks/main.yml
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
- import_tasks: pre-upgrade.yml
|
||||
tags:
|
||||
- k8s-pre-upgrade
|
||||
|
||||
- name: Create webhook token auth config
|
||||
template:
|
||||
src: webhook-token-auth-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
|
||||
mode: 0640
|
||||
when: kube_webhook_token_auth|default(false)
|
||||
|
||||
- name: Create webhook authorization config
|
||||
template:
|
||||
src: webhook-authorization-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
|
||||
mode: 0640
|
||||
when: kube_webhook_authorization|default(false)
|
||||
|
||||
- name: Create kube-scheduler config
|
||||
template:
|
||||
src: kubescheduler-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
|
||||
mode: 0644
|
||||
|
||||
- import_tasks: encrypt-at-rest.yml
|
||||
when:
|
||||
- kube_encrypt_secret_data
|
||||
|
||||
- name: Install | Copy kubectl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}"
|
||||
dest: "{{ bin_dir }}/kubectl"
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
tags:
|
||||
- kubectl
|
||||
- upgrade
|
||||
|
||||
- name: Install kubectl bash completion
|
||||
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags:
|
||||
- kubectl
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Set kubectl bash completion file permissions
|
||||
file:
|
||||
path: /etc/bash_completion.d/kubectl.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags:
|
||||
- kubectl
|
||||
- upgrade
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
|
||||
set_fact:
|
||||
kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
|
||||
when: podsecuritypolicy_enabled
|
||||
|
||||
- name: Define nodes already joined to existing cluster and first_kube_control_plane
|
||||
import_tasks: define-first-kube-control.yml
|
||||
|
||||
- name: Include kubeadm setup
|
||||
import_tasks: kubeadm-setup.yml
|
||||
|
||||
- name: Include kubeadm etcd extra tasks
|
||||
include_tasks: kubeadm-etcd.yml
|
||||
when: etcd_deployment_type == "kubeadm"
|
||||
|
||||
- name: Include kubeadm secondary server apiserver fixes
|
||||
include_tasks: kubeadm-fix-apiserver.yml
|
||||
|
||||
- name: Include kubelet client cert rotation fixes
|
||||
include_tasks: kubelet-fix-client-cert-rotation.yml
|
||||
when: kubelet_rotate_certificates
|
||||
|
||||
- name: Install script to renew K8S control plane certificates
|
||||
template:
|
||||
src: k8s-certs-renew.sh.j2
|
||||
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
|
||||
mode: 0755
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 1/2
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- k8s-certs-renew.service
|
||||
- k8s-certs-renew.timer
|
||||
register: k8s_certs_units
|
||||
when: auto_renew_certificates
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 2/2
|
||||
systemd:
|
||||
name: k8s-certs-renew.timer
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon-reload: "{{ k8s_certs_units is changed }}"
|
||||
when: auto_renew_certificates
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
|
||||
file:
|
||||
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
|
||||
state: absent
|
||||
with_items:
|
||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
register: kube_apiserver_manifest_replaced
|
||||
when: etcd_secret_changed|default(false)
|
||||
|
||||
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503
|
||||
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
with_items:
|
||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
when: kube_apiserver_manifest_replaced.changed
|
||||
register: remove_master_container
|
||||
retries: 10
|
||||
until: remove_master_container.rc == 0
|
||||
delay: 1
|
||||
@@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: Check AppArmor status
|
||||
command: which apparmor_parser
|
||||
register: apparmor_status
|
||||
failed_when: false
|
||||
changed_when: apparmor_status.rc != 0
|
||||
|
||||
- name: Set apparmor_enabled
|
||||
set_fact:
|
||||
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||
|
||||
- name: Render templates for PodSecurityPolicy
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0640
|
||||
register: psp_manifests
|
||||
with_items:
|
||||
- {file: psp.yml, type: psp, name: psp}
|
||||
- {file: psp-cr.yml, type: clusterrole, name: psp-cr}
|
||||
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
|
||||
|
||||
- name: Add policies, roles, bindings for PodSecurityPolicy
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
register: result
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 6
|
||||
with_items: "{{ psp_manifests.results }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_dir }}/admin.conf"
|
||||
loop_control:
|
||||
label: "{{ item.item.file }}"
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: AdmissionConfiguration
|
||||
plugins:
|
||||
{% for plugin in kube_apiserver_enable_admission_plugins %}
|
||||
{% if plugin in kube_apiserver_admission_plugins_needs_configuration %}
|
||||
- name: {{ plugin }}
|
||||
path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,129 @@
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
{% if audit_policy_custom_rules is defined and audit_policy_custom_rules != "" %}
|
||||
{{ audit_policy_custom_rules | indent(2, true) }}
|
||||
{% else %}
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads `configmaps/ingress-uid` through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
# Don't log events requests.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
{% endif %}
|
||||
@@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: {{ audit_webhook_server_url }}
|
||||
{% for key in audit_webhook_server_extra_args %}
|
||||
{{ key }}: "{{ audit_webhook_server_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
name: auditsink
|
||||
contexts:
|
||||
- context:
|
||||
cluster: auditsink
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
|
||||
kind: Configuration
|
||||
limits:
|
||||
{% for limit in kube_apiserver_admission_event_rate_limits.values() %}
|
||||
- type: {{ limit.type }}
|
||||
qps: {{ limit.qps }}
|
||||
burst: {{ limit.burst }}
|
||||
{% if limit.cache_size is defined %}
|
||||
cacheSize: {{ limit.cache_size }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,6 @@
|
||||
[Unit]
|
||||
Description=Renew K8S control plane certificates
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ bin_dir }}/k8s-certs-renew.sh
|
||||
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "## Expiration before renewal ##"
|
||||
{{ bin_dir }}/kubeadm certs check-expiration
|
||||
|
||||
echo "## Renewing certificates managed by kubeadm ##"
|
||||
{{ bin_dir }}/kubeadm certs renew all
|
||||
|
||||
echo "## Restarting control plane pods managed by kubeadm ##"
|
||||
{% if container_manager == "docker" %}
|
||||
{{ docker_bin_dir }}/docker ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs {{ docker_bin_dir }}/docker rm -f
|
||||
{% else %}
|
||||
{{ bin_dir }}/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs {{ bin_dir }}/crictl rmp -f
|
||||
{% endif %}
|
||||
|
||||
echo "## Updating /root/.kube/config ##"
|
||||
cp {{ kube_config_dir }}/admin.conf /root/.kube/config
|
||||
|
||||
echo "## Waiting for apiserver to be up again ##"
|
||||
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
|
||||
|
||||
echo "## Expiration after renewal ##"
|
||||
{{ bin_dir }}/kubeadm certs check-expiration
|
||||
@@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Timer to renew K8S control plane certificates
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ auto_renew_certificates_systemd_calendar }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,453 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: InitConfiguration
|
||||
{% if kubeadm_token is defined %}
|
||||
bootstrapTokens:
|
||||
- token: "{{ kubeadm_token }}"
|
||||
description: "kubespray kubeadm bootstrap token"
|
||||
ttl: "24h"
|
||||
{% endif %}
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
{% if kubeadm_certificate_key is defined %}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
{% endif %}
|
||||
nodeRegistration:
|
||||
{% if kube_override_hostname|default('') %}
|
||||
name: {{ kube_override_hostname }}
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
{% else %}
|
||||
taints: []
|
||||
{% endif %}
|
||||
criSocket: {{ cri_socket }}
|
||||
{% if cloud_provider is defined and cloud_provider in ["external"] %}
|
||||
kubeletExtraArgs:
|
||||
cloud-provider: external
|
||||
{% endif %}
|
||||
{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches.dest_dir }}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: ClusterConfiguration
|
||||
clusterName: {{ cluster_name }}
|
||||
etcd:
|
||||
{% if etcd_deployment_type != "kubeadm" %}
|
||||
external:
|
||||
endpoints:
|
||||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
|
||||
{% elif etcd_deployment_type == "kubeadm" %}
|
||||
local:
|
||||
imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}"
|
||||
imageTag: "{{ etcd_image_tag }}"
|
||||
dataDir: "{{ etcd_data_dir }}"
|
||||
extraArgs:
|
||||
metrics: {{ etcd_metrics }}
|
||||
election-timeout: "{{ etcd_election_timeout }}"
|
||||
heartbeat-interval: "{{ etcd_heartbeat_interval }}"
|
||||
auto-compaction-retention: "{{ etcd_compaction_retention }}"
|
||||
{% if etcd_snapshot_count is defined %}
|
||||
snapshot-count: "{{ etcd_snapshot_count }}"
|
||||
{% endif %}
|
||||
{% if etcd_quota_backend_bytes is defined %}
|
||||
quota-backend-bytes: "{{ etcd_quota_backend_bytes }}"
|
||||
{% endif %}
|
||||
{% if etcd_max_request_bytes is defined %}
|
||||
max-request-bytes: "{{ etcd_max_request_bytes }}"
|
||||
{% endif %}
|
||||
{% if etcd_log_level is defined %}
|
||||
log-level: "{{ etcd_log_level }}"
|
||||
{% endif %}
|
||||
{% for key, value in etcd_extra_vars.items() %}
|
||||
{{ key }}: "{{ value }}"
|
||||
{% endfor %}
|
||||
serverCertSANs:
|
||||
{% for san in etcd_cert_alt_names %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
{% for san in etcd_cert_alt_ips %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
peerCertSANs:
|
||||
{% for san in etcd_cert_alt_names %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
{% for san in etcd_cert_alt_ips %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
dns:
|
||||
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }}
|
||||
imageTag: {{ coredns_image_tag }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
{% if kubeadm_feature_gates %}
|
||||
featureGates:
|
||||
{% for feature in kubeadm_feature_gates %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
controlPlaneEndpoint: {{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}
|
||||
{% endif %}
|
||||
certificatesDir: {{ kube_cert_dir }}
|
||||
imageRepository: {{ kube_image_repo }}
|
||||
apiServer:
|
||||
extraArgs:
|
||||
{% if kube_apiserver_pod_eviction_not_ready_timeout_seconds is defined %}
|
||||
default-not-ready-toleration-seconds: "{{ kube_apiserver_pod_eviction_not_ready_timeout_seconds }}"
|
||||
{% endif %}
|
||||
{% if kube_apiserver_pod_eviction_unreachable_timeout_seconds is defined %}
|
||||
default-unreachable-toleration-seconds: "{{ kube_apiserver_pod_eviction_unreachable_timeout_seconds }}"
|
||||
{% endif %}
|
||||
{% if kube_api_anonymous_auth is defined %}
|
||||
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||
{% endif %}
|
||||
authorization-mode: {{ authorization_modes | join(',') }}
|
||||
bind-address: {{ kube_apiserver_bind_address }}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_apiserver_admission_control_config_file %}
|
||||
admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml
|
||||
{% endif %}
|
||||
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
|
||||
disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
endpoint-reconciler-type: lease
|
||||
{% if etcd_events_cluster_enabled %}
|
||||
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}"
|
||||
{% endif %}
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||
profiling: "{{ kube_profiling }}"
|
||||
request-timeout: "{{ kube_apiserver_request_timeout }}"
|
||||
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||
{% if kube_token_auth|default(true) %}
|
||||
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
|
||||
{% endif %}
|
||||
{% if kube_apiserver_service_account_lookup %}
|
||||
service-account-lookup: "{{ kube_apiserver_service_account_lookup }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||
oidc-issuer-url: "{{ kube_oidc_url }}"
|
||||
oidc-client-id: "{{ kube_oidc_client_id }}"
|
||||
{% if kube_oidc_ca_file is defined %}
|
||||
oidc-ca-file: "{{ kube_oidc_ca_file }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_username_claim is defined %}
|
||||
oidc-username-claim: "{{ kube_oidc_username_claim }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_groups_claim is defined %}
|
||||
oidc-groups-claim: "{{ kube_oidc_groups_claim }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_username_prefix is defined %}
|
||||
oidc-username-prefix: "{{ kube_oidc_username_prefix }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_groups_prefix is defined %}
|
||||
oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if kube_webhook_token_auth|default(false) %}
|
||||
authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_webhook_authorization|default(false) %}
|
||||
authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_encrypt_secret_data %}
|
||||
encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
|
||||
{% endif %}
|
||||
storage-backend: {{ kube_apiserver_storage_backend }}
|
||||
{% if kube_api_runtime_config|length > 0 %}
|
||||
runtime-config: {{ kube_api_runtime_config | join(',') }}
|
||||
{% endif %}
|
||||
allow-privileged: "true"
|
||||
{% if kubernetes_audit or kubernetes_audit_webhook %}
|
||||
audit-policy-file: {{ audit_policy_file }}
|
||||
{% endif %}
|
||||
{% if kubernetes_audit %}
|
||||
audit-log-path: "{{ audit_log_path }}"
|
||||
audit-log-maxage: "{{ audit_log_maxage }}"
|
||||
audit-log-maxbackup: "{{ audit_log_maxbackups }}"
|
||||
audit-log-maxsize: "{{ audit_log_maxsize }}"
|
||||
{% endif %}
|
||||
{% if kubernetes_audit_webhook %}
|
||||
audit-webhook-config-file: {{ audit_webhook_config_file }}
|
||||
audit-webhook-mode: {{ audit_webhook_mode }}
|
||||
{% if audit_webhook_mode == "batch" %}
|
||||
audit-webhook-batch-max-size: "{{ audit_webhook_batch_max_size }}"
|
||||
audit-webhook-batch-max-wait: "{{ audit_webhook_batch_max_wait }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% for key in kube_kubeadm_apiserver_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if kube_apiserver_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_apiserver_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
cloud-provider: {{ cloud_provider }}
|
||||
cloud-config: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tls-min-version: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% if event_ttl_duration is defined %}
|
||||
event-ttl: {{ event_ttl_duration }}
|
||||
{% endif %}
|
||||
{% if kubelet_rotate_server_certificates %}
|
||||
kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
{% endif %}
|
||||
{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %}
|
||||
extraVolumes:
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
- name: cloud-config
|
||||
hostPath: {{ kube_config_dir }}/cloud_config
|
||||
mountPath: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_token_auth|default(true) %}
|
||||
- name: token-auth-config
|
||||
hostPath: {{ kube_token_dir }}
|
||||
mountPath: {{ kube_token_dir }}
|
||||
{% endif %}
|
||||
{% if kube_webhook_token_auth|default(false) %}
|
||||
- name: webhook-token-auth-config
|
||||
hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_webhook_authorization|default(false) %}
|
||||
- name: webhook-authorization-config
|
||||
hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
|
||||
{% endif %}
|
||||
{% if kubernetes_audit or kubernetes_audit_webhook %}
|
||||
- name: {{ audit_policy_name }}
|
||||
hostPath: {{ audit_policy_hostpath }}
|
||||
mountPath: {{ audit_policy_mountpath }}
|
||||
{% if audit_log_path != "-" %}
|
||||
- name: {{ audit_log_name }}
|
||||
hostPath: {{ audit_log_hostpath }}
|
||||
mountPath: {{ audit_log_mountpath }}
|
||||
readOnly: false
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if kube_apiserver_admission_control_config_file %}
|
||||
- name: admission-control-configs
|
||||
hostPath: {{ kube_config_dir }}/admission-controls
|
||||
mountPath: {{ kube_config_dir }}
|
||||
readOnly: false
|
||||
pathType: DirectoryOrCreate
|
||||
{% endif %}
|
||||
{% for volume in apiserver_extra_volumes %}
|
||||
- name: {{ volume.name }}
|
||||
hostPath: {{ volume.hostPath }}
|
||||
mountPath: {{ volume.mountPath }}
|
||||
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
|
||||
{% endfor %}
|
||||
{% if ssl_ca_dirs|length %}
|
||||
{% for dir in ssl_ca_dirs %}
|
||||
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
|
||||
hostPath: {{ dir }}
|
||||
mountPath: {{ dir }}
|
||||
readOnly: true
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
certSANs:
|
||||
{% for san in apiserver_sans %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
timeoutForControlPlane: 5m0s
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if enable_dual_stack_networks %}
|
||||
node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}"
|
||||
node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}"
|
||||
{% else %}
|
||||
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
|
||||
{% endif %}
|
||||
profiling: "{{ kube_profiling }}"
|
||||
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
|
||||
bind-address: {{ kube_controller_manager_bind_address }}
|
||||
leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }}
|
||||
leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }}
|
||||
{% if kube_controller_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_controller_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
{% endif %}
|
||||
{% for key in kube_kubeadm_controller_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
cloud-provider: {{ cloud_provider }}
|
||||
cloud-config: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["cloud"] %}
|
||||
configure-cloud-routes: "false"
|
||||
{% endif %}
|
||||
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
||||
flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}}
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tls-min-version: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] or controller_manager_extra_volumes %}
|
||||
extraVolumes:
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
|
||||
- name: openstackcacert
|
||||
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
- name: cloud-config
|
||||
hostPath: {{ kube_config_dir }}/cloud_config
|
||||
mountPath: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% for volume in controller_manager_extra_volumes %}
|
||||
- name: {{ volume.name }}
|
||||
hostPath: {{ volume.hostPath }}
|
||||
mountPath: {{ volume.mountPath }}
|
||||
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: {{ kube_scheduler_bind_address }}
|
||||
config: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
{% if kube_scheduler_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
{% endif %}
|
||||
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
|
||||
{% for key in kube_kubeadm_scheduler_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tls-min-version: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
extraVolumes:
|
||||
- name: kubescheduler-config
|
||||
hostPath: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
readOnly: true
|
||||
{% if scheduler_extra_volumes %}
|
||||
{% for volume in scheduler_extra_volumes %}
|
||||
- name: {{ volume.name }}
|
||||
hostPath: {{ volume.hostPath }}
|
||||
mountPath: {{ volume.mountPath }}
|
||||
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
bindAddress: {{ kube_proxy_bind_address }}
|
||||
clientConnection:
|
||||
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
|
||||
burst: {{ kube_proxy_client_burst }}
|
||||
contentType: {{ kube_proxy_client_content_type }}
|
||||
kubeconfig: {{ kube_proxy_client_kubeconfig }}
|
||||
qps: {{ kube_proxy_client_qps }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
configSyncPeriod: {{ kube_proxy_config_sync_period }}
|
||||
conntrack:
|
||||
maxPerCore: {{ kube_proxy_conntrack_max_per_core }}
|
||||
min: {{ kube_proxy_conntrack_min }}
|
||||
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
|
||||
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
|
||||
enableProfiling: {{ kube_proxy_enable_profiling }}
|
||||
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
|
||||
hostnameOverride: {{ kube_override_hostname }}
|
||||
iptables:
|
||||
masqueradeAll: {{ kube_proxy_masquerade_all }}
|
||||
masqueradeBit: {{ kube_proxy_masquerade_bit }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
ipvs:
|
||||
excludeCIDRs: {{ kube_proxy_exclude_cidrs }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
scheduler: {{ kube_proxy_scheduler }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
strictARP: {{ kube_proxy_strict_arp }}
|
||||
tcpTimeout: {{ kube_proxy_tcp_timeout }}
|
||||
tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }}
|
||||
udpTimeout: {{ kube_proxy_udp_timeout }}
|
||||
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
|
||||
mode: {{ kube_proxy_mode }}
|
||||
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
|
||||
oomScoreAdj: {{ kube_proxy_oom_score_adj }}
|
||||
portRange: {{ kube_proxy_port_range }}
|
||||
udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }}
|
||||
{% if kube_proxy_feature_gates or kube_feature_gates %}
|
||||
{% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %}
|
||||
featureGates:
|
||||
{% for feature in feature_gates %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{# DNS settings for kubelet #}
|
||||
{% if enable_nodelocaldns %}
|
||||
{% set kubelet_cluster_dns = [nodelocaldns_ip] %}
|
||||
{% elif dns_mode in ['coredns'] %}
|
||||
{% set kubelet_cluster_dns = [skydns_server] %}
|
||||
{% elif dns_mode == 'coredns_dual' %}
|
||||
{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %}
|
||||
{% elif dns_mode == 'manual' %}
|
||||
{% set kubelet_cluster_dns = [manual_dns_server] %}
|
||||
{% else %}
|
||||
{% set kubelet_cluster_dns = [] %}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
clusterDNS:
|
||||
{% for dns_address in kubelet_cluster_dns %}
|
||||
- {{ dns_address }}
|
||||
{% endfor %}
|
||||
{% if kubelet_feature_gates or kube_feature_gates %}
|
||||
{% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %}
|
||||
featureGates:
|
||||
{% for feature in feature_gates %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,34 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: JoinConfiguration
|
||||
discovery:
|
||||
bootstrapToken:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
apiServerEndpoint: {{ kubeadm_discovery_address }}
|
||||
{% endif %}
|
||||
token: {{ kubeadm_token }}
|
||||
unsafeSkipCAVerification: true
|
||||
timeout: {{ discovery_timeout }}
|
||||
tlsBootstrapToken: {{ kubeadm_token }}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
nodeRegistration:
|
||||
name: {{ kube_override_hostname|default(inventory_hostname) }}
|
||||
criSocket: {{ cri_socket }}
|
||||
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
{% else %}
|
||||
taints: []
|
||||
{% endif %}
|
||||
{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches.dest_dir }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,25 @@
|
||||
{% set kubescheduler_config_api_version = "v1beta3" %}
|
||||
apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }}
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: "{{ kube_config_dir }}/scheduler.conf"
|
||||
{% for key in kube_scheduler_client_conn_extra_opts %}
|
||||
{{ key }}: {{ kube_scheduler_client_conn_extra_opts[key] }}
|
||||
{% endfor %}
|
||||
{% if kube_scheduler_extenders %}
|
||||
extenders:
|
||||
{{ kube_scheduler_extenders | to_nice_yaml(indent=2, width=256) }}
|
||||
{% endif %}
|
||||
leaderElection:
|
||||
leaseDuration: {{ kube_scheduler_leader_elect_lease_duration }}
|
||||
renewDeadline: {{ kube_scheduler_leader_elect_renew_deadline }}
|
||||
{% for key in kube_scheduler_leader_elect_extra_opts %}
|
||||
{{ key }}: {{ kube_scheduler_leader_elect_extra_opts[key] }}
|
||||
{% endfor %}
|
||||
{% if kube_scheduler_profiles %}
|
||||
profiles:
|
||||
{{ kube_scheduler_profiles | to_nice_yaml(indent=2, width=256) }}
|
||||
{% endif %}
|
||||
{% for key in kube_scheduler_config_extra_opts %}
|
||||
{{ key }}: {{ kube_scheduler_config_extra_opts[key] }}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,17 @@
|
||||
{% if kube_pod_security_use_default %}
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
kind: PodSecurityConfiguration
|
||||
defaults:
|
||||
enforce: "{{ kube_pod_security_default_enforce }}"
|
||||
enforce-version: "{{ kube_pod_security_default_enforce_version }}"
|
||||
audit: "{{ kube_pod_security_default_audit }}"
|
||||
audit-version: "{{ kube_pod_security_default_audit_version }}"
|
||||
warn: "{{ kube_pod_security_default_warn }}"
|
||||
warn-version: "{{ kube_pod_security_default_warn_version }}"
|
||||
exemptions:
|
||||
usernames: {{ kube_pod_security_exemptions_usernames|to_json }}
|
||||
runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }}
|
||||
namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }}
|
||||
{% else %}
|
||||
# This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,32 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- privileged
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- restricted
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
@@ -0,0 +1,54 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: psp:any:restricted
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:restricted
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:authenticated
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: psp:kube-system:privileged
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:privileged
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:masters
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:kube-system
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: psp:nodes:privileged
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
|
||||
be used in combination with the NodeRestriction admission plugin to limit
|
||||
nodes to mirror pods bound to themselves.'
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:privileged
|
||||
subjects:
|
||||
- kind: Group
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: system:nodes
|
||||
- kind: User
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
# Legacy node ID
|
||||
name: kubelet
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: restricted
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
{% if apparmor_enabled %}
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
{% endif %}
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
{{ podsecuritypolicy_restricted_spec | to_yaml(indent=2, width=1337) | indent(width=2) }}
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
{{ podsecuritypolicy_privileged_spec | to_yaml(indent=2, width=1337) | indent(width=2) }}
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: EncryptionConfiguration
|
||||
resources:
|
||||
- resources:
|
||||
{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }}
|
||||
providers:
|
||||
- {{ kube_encryption_algorithm }}:
|
||||
keys:
|
||||
- name: key
|
||||
secret: {{ kube_encrypt_token | b64encode }}
|
||||
- identity: {}
|
||||
@@ -0,0 +1,18 @@
|
||||
# clusters refers to the remote service.
|
||||
clusters:
|
||||
- name: webhook-token-authz-cluster
|
||||
cluster:
|
||||
server: {{ kube_webhook_authorization_url }}
|
||||
insecure-skip-tls-verify: {{ kube_webhook_authorization_url_skip_tls_verify }}
|
||||
|
||||
# users refers to the API server's webhook configuration.
|
||||
users:
|
||||
- name: webhook-token-authz-user
|
||||
|
||||
# kubeconfig files require a context. Provide one for the API server.
|
||||
current-context: webhook-token-authz
|
||||
contexts:
|
||||
- context:
|
||||
cluster: webhook-token-authz-cluster
|
||||
user: webhook-token-authz-user
|
||||
name: webhook-token-authz
|
||||
@@ -0,0 +1,21 @@
|
||||
# clusters refers to the remote service.
|
||||
clusters:
|
||||
- name: webhook-token-auth-cluster
|
||||
cluster:
|
||||
server: {{ kube_webhook_token_auth_url }}
|
||||
insecure-skip-tls-verify: {{ kube_webhook_token_auth_url_skip_tls_verify }}
|
||||
{% if kube_webhook_token_auth_ca_data is defined %}
|
||||
certificate-authority-data: {{ kube_webhook_token_auth_ca_data }}
|
||||
{% endif %}
|
||||
|
||||
# users refers to the API server's webhook configuration.
|
||||
users:
|
||||
- name: webhook-token-auth-user
|
||||
|
||||
# kubeconfig files require a context. Provide one for the API server.
|
||||
current-context: webhook-token-auth
|
||||
contexts:
|
||||
- context:
|
||||
cluster: webhook-token-auth-cluster
|
||||
user: webhook-token-auth-user
|
||||
name: webhook-token-auth
|
||||
3
kubespray/roles/kubernetes/control-plane/vars/main.yaml
Normal file
3
kubespray/roles/kubernetes/control-plane/vars/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# list of admission plugins that needs to be configured
|
||||
kube_apiserver_admission_plugins_needs_configuration: [EventRateLimit, PodSecurity]
|
||||
12
kubespray/roles/kubernetes/kubeadm/defaults/main.yml
Normal file
12
kubespray/roles/kubernetes/kubeadm/defaults/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
# discovery_timeout modifies the discovery timeout
|
||||
# This value must be smaller than kubeadm_join_timeout
|
||||
discovery_timeout: 60s
|
||||
kubeadm_join_timeout: 120s
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
kube_override_hostname: >-
|
||||
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
{%- else -%}
|
||||
{{ inventory_hostname }}
|
||||
{%- endif -%}
|
||||
15
kubespray/roles/kubernetes/kubeadm/handlers/main.yml
Normal file
15
kubespray/roles/kubernetes/kubeadm/handlers/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Kubeadm | restart kubelet
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Kubeadm | reload systemd
|
||||
- Kubeadm | reload kubelet
|
||||
|
||||
- name: Kubeadm | reload systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Kubeadm | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
@@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_certificate_key'] }}"
|
||||
when: kubeadm_certificate_key is undefined
|
||||
|
||||
- name: Pull control plane certs down
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubeadm join phase
|
||||
control-plane-prepare download-certs
|
||||
--certificate-key {{ kubeadm_certificate_key }}
|
||||
--control-plane
|
||||
--token {{ kubeadm_token }}
|
||||
--discovery-token-unsafe-skip-ca-verification
|
||||
{{ kubeadm_discovery_address }}
|
||||
&&
|
||||
{{ bin_dir }}/kubeadm join phase
|
||||
control-plane-prepare certs
|
||||
--control-plane
|
||||
--token {{ kubeadm_token }}
|
||||
--discovery-token-unsafe-skip-ca-verification
|
||||
{{ kubeadm_discovery_address }}
|
||||
args:
|
||||
creates: "{{ kube_cert_dir }}/apiserver-etcd-client.key"
|
||||
|
||||
- name: Delete unneeded certificates
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ kube_cert_dir }}/apiserver.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver.key"
|
||||
- "{{ kube_cert_dir }}/ca.key"
|
||||
- "{{ kube_cert_dir }}/etcd/ca.key"
|
||||
- "{{ kube_cert_dir }}/etcd/healthcheck-client.crt"
|
||||
- "{{ kube_cert_dir }}/etcd/healthcheck-client.key"
|
||||
- "{{ kube_cert_dir }}/etcd/peer.crt"
|
||||
- "{{ kube_cert_dir }}/etcd/peer.key"
|
||||
- "{{ kube_cert_dir }}/etcd/server.crt"
|
||||
- "{{ kube_cert_dir }}/etcd/server.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.key"
|
||||
- "{{ kube_cert_dir }}/sa.key"
|
||||
- "{{ kube_cert_dir }}/sa.pub"
|
||||
|
||||
- name: Calculate etcd cert serial
|
||||
command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial"
|
||||
register: "etcd_client_cert_serial_result"
|
||||
changed_when: false
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
|
||||
tags:
|
||||
- network
|
||||
|
||||
- name: Set etcd_client_cert_serial
|
||||
set_fact:
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
|
||||
tags:
|
||||
- network
|
||||
176
kubespray/roles/kubernetes/kubeadm/tasks/main.yml
Normal file
176
kubespray/roles/kubernetes/kubeadm/tasks/main.yml
Normal file
@@ -0,0 +1,176 @@
|
||||
---
|
||||
- name: Set kubeadm_discovery_address
|
||||
set_fact:
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint | replace("https://", "") }}
|
||||
{%- endif %}
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Check if kubelet.conf exists
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubelet_conf
|
||||
|
||||
- name: Check if kubeadm CA cert is accessible
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/ca.crt"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubeadm_ca_stat
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Calculate kubeadm CA cert hash
|
||||
shell: set -o pipefail && openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: kubeadm_ca_hash
|
||||
when:
|
||||
- kubeadm_ca_stat.stat is defined
|
||||
- kubeadm_ca_stat.stat.exists
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Create kubeadm token for joining nodes with 24h expiration (default)
|
||||
command: "{{ bin_dir }}/kubeadm token create"
|
||||
register: temp_token
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
when: kubeadm_token is not defined
|
||||
changed_when: false
|
||||
|
||||
- name: Set kubeadm_token to generated token
|
||||
set_fact:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
when: kubeadm_token is not defined
|
||||
|
||||
- name: Set kubeadm api version to v1beta3
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1beta3
|
||||
|
||||
- name: Create kubeadm client config
|
||||
template:
|
||||
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||
backup: yes
|
||||
mode: 0640
|
||||
when: not is_kube_master
|
||||
|
||||
- name: kubeadm | Create directory to store kubeadm patches
|
||||
file:
|
||||
path: "{{ kubeadm_patches.dest_dir }}"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||
|
||||
- name: kubeadm | Copy kubeadm patches from inventory files
|
||||
copy:
|
||||
src: "{{ kubeadm_patches.source_dir }}/"
|
||||
dest: "{{ kubeadm_patches.dest_dir }}"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||
|
||||
- name: Join to cluster if needed
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
|
||||
when: not is_kube_master and (not kubelet_conf.stat.exists)
|
||||
block:
|
||||
|
||||
- name: Join to cluster
|
||||
command: >-
|
||||
timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }}
|
||||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir }}/kubeadm-client.conf
|
||||
--ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
|
||||
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
|
||||
register: kubeadm_join
|
||||
changed_when: kubeadm_join is success
|
||||
|
||||
rescue:
|
||||
|
||||
- name: Join to cluster with ignores
|
||||
command: >-
|
||||
timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }}
|
||||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir }}/kubeadm-client.conf
|
||||
--ignore-preflight-errors=all
|
||||
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
|
||||
register: kubeadm_join
|
||||
changed_when: kubeadm_join is success
|
||||
|
||||
always:
|
||||
|
||||
- name: Display kubeadm join stderr if any
|
||||
when: kubeadm_join is failed
|
||||
debug:
|
||||
msg: |
|
||||
Joined with warnings
|
||||
{{ kubeadm_join.stderr_lines }}
|
||||
|
||||
- name: Update server field in kubelet kubeconfig
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: 'server:'
|
||||
line: ' server: {{ kube_apiserver_endpoint }}'
|
||||
backup: yes
|
||||
when:
|
||||
- kubeadm_config_api_fqdn is not defined
|
||||
- not is_kube_master
|
||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||
notify: Kubeadm | restart kubelet
|
||||
|
||||
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
|
||||
# incorrectly to first master, creating SPoF.
|
||||
- name: Update server field in kube-proxy kubeconfig
|
||||
shell: >-
|
||||
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
|
||||
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
||||
| {{ kubectl }} replace -f -
|
||||
args:
|
||||
executable: /bin/bash
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||
delegate_facts: false
|
||||
when:
|
||||
- kubeadm_config_api_fqdn is not defined
|
||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||
- kube_proxy_deployed
|
||||
- loadbalancer_apiserver_localhost
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Set ca.crt file permission
|
||||
file:
|
||||
path: "{{ kube_cert_dir }}/ca.crt"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||
command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||
delegate_facts: false
|
||||
when:
|
||||
- kubeadm_config_api_fqdn is not defined
|
||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||
- kube_proxy_deployed
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Extract etcd certs from control plane if using etcd kubeadm mode
|
||||
include_tasks: kubeadm_etcd_node.yml
|
||||
when:
|
||||
- etcd_deployment_type == "kubeadm"
|
||||
- inventory_hostname not in groups['kube_control_plane']
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
@@ -0,0 +1,32 @@
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: JoinConfiguration
|
||||
discovery:
|
||||
bootstrapToken:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
apiServerEndpoint: {{ kubeadm_discovery_address }}
|
||||
{% endif %}
|
||||
token: {{ kubeadm_token }}
|
||||
{% if kubeadm_ca_hash.stdout is defined %}
|
||||
caCertHashes:
|
||||
- sha256:{{ kubeadm_ca_hash.stdout }}
|
||||
{% else %}
|
||||
unsafeSkipCAVerification: true
|
||||
{% endif %}
|
||||
timeout: {{ discovery_timeout }}
|
||||
tlsBootstrapToken: {{ kubeadm_token }}
|
||||
caCertPath: {{ kube_cert_dir }}/ca.crt
|
||||
nodeRegistration:
|
||||
name: '{{ kube_override_hostname }}'
|
||||
criSocket: {{ cri_socket }}
|
||||
{% if 'calico_rr' in group_names and 'kube_node' not in group_names %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/calico-rr
|
||||
{% endif %}
|
||||
{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches.dest_dir }}
|
||||
{% endif %}
|
||||
49
kubespray/roles/kubernetes/node-label/tasks/main.yml
Normal file
49
kubespray/roles/kubernetes/node-label/tasks/main.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Wait for kube-apiserver
|
||||
uri:
|
||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||
validate_certs: no
|
||||
client_cert: "{{ kube_apiserver_client_cert }}"
|
||||
client_key: "{{ kube_apiserver_client_key }}"
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
delay: 6
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Set role node label to empty list
|
||||
set_fact:
|
||||
role_node_labels: []
|
||||
|
||||
- name: Node label for nvidia GPU nodes
|
||||
set_fact:
|
||||
role_node_labels: "{{ role_node_labels + [ 'nvidia.com/gpu=true' ] }}"
|
||||
when:
|
||||
- nvidia_gpu_nodes is defined
|
||||
- nvidia_accelerator_enabled|bool
|
||||
- inventory_hostname in nvidia_gpu_nodes
|
||||
|
||||
- name: Set inventory node label to empty list
|
||||
set_fact:
|
||||
inventory_node_labels: []
|
||||
|
||||
- name: Populate inventory node label
|
||||
set_fact:
|
||||
inventory_node_labels: "{{ inventory_node_labels + [ '%s=%s'|format(item.key, item.value) ] }}"
|
||||
loop: "{{ node_labels|d({})|dict2items }}"
|
||||
when:
|
||||
- node_labels is defined
|
||||
- node_labels is mapping
|
||||
|
||||
- debug: # noqa unnamed-task
|
||||
var: role_node_labels
|
||||
- debug: # noqa unnamed-task
|
||||
var: inventory_node_labels
|
||||
|
||||
- name: Set label to node
|
||||
command: >-
|
||||
{{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
|
||||
loop: "{{ role_node_labels + inventory_node_labels }}"
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
changed_when: false
|
||||
...
|
||||
235
kubespray/roles/kubernetes/node/defaults/main.yml
Normal file
235
kubespray/roles/kubernetes/node/defaults/main.yml
Normal file
@@ -0,0 +1,235 @@
|
||||
---
|
||||
# advertised host IP for kubelet. This affects network plugin config. Take caution
|
||||
kubelet_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}"
|
||||
|
||||
# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces
|
||||
kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
|
||||
|
||||
# resolv.conf to base dns config
|
||||
kube_resolv_conf: "/etc/resolv.conf"
|
||||
|
||||
# Set to empty to avoid cgroup creation
|
||||
kubelet_enforce_node_allocatable: "\"\""
|
||||
|
||||
# Set runtime and kubelet cgroups when using systemd as cgroup driver (default)
|
||||
kubelet_runtime_cgroups: "/systemd/system.slice"
|
||||
kubelet_kubelet_cgroups: "/systemd/system.slice"
|
||||
|
||||
# Set runtime and kubelet cgroups when using cgroupfs as cgroup driver
|
||||
kubelet_runtime_cgroups_cgroupfs: "/system.slice/containerd.service"
|
||||
kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
|
||||
|
||||
### fail with swap on (default true)
|
||||
kubelet_fail_swap_on: true
|
||||
|
||||
# Set systemd service hardening features
|
||||
kubelet_systemd_hardening: false
|
||||
|
||||
# List of secure IPs for kubelet
|
||||
kubelet_secure_addresses: >-
|
||||
{%- for host in groups['kube_control_plane'] -%}
|
||||
{{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ ' ' if not loop.last else '' }}
|
||||
{%- endfor -%}
|
||||
|
||||
# Reserve this space for kube resources
|
||||
kube_memory_reserved: 256Mi
|
||||
kube_cpu_reserved: 100m
|
||||
# kube_ephemeral_storage_reserved: 2Gi
|
||||
# kube_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
kube_master_memory_reserved: 512Mi
|
||||
kube_master_cpu_reserved: 200m
|
||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||
# kube_master_pid_reserved: "1000"
|
||||
|
||||
# Set to true to reserve resources for system daemons
|
||||
system_reserved: false
|
||||
system_memory_reserved: 512Mi
|
||||
system_cpu_reserved: 500m
|
||||
# system_ephemeral_storage_reserved: 2Gi
|
||||
# system_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
system_master_memory_reserved: 256Mi
|
||||
system_master_cpu_reserved: 250m
|
||||
# system_master_ephemeral_storage_reserved: 2Gi
|
||||
# system_master_pid_reserved: "1000"
|
||||
|
||||
## Eviction Thresholds to avoid system OOMs
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds
|
||||
eviction_hard: {}
|
||||
eviction_hard_control_plane: {}
|
||||
|
||||
kubelet_status_update_frequency: 10s
|
||||
|
||||
# kube-vip
|
||||
kube_vip_version: v0.5.5
|
||||
|
||||
kube_vip_arp_enabled: false
|
||||
kube_vip_interface:
|
||||
kube_vip_services_interface:
|
||||
kube_vip_cidr: 32
|
||||
kube_vip_controlplane_enabled: false
|
||||
kube_vip_ddns_enabled: false
|
||||
kube_vip_services_enabled: false
|
||||
kube_vip_leader_election_enabled: "{{ kube_vip_arp_enabled }}"
|
||||
kube_vip_bgp_enabled: false
|
||||
kube_vip_bgp_routerid:
|
||||
kube_vip_local_as: 65000
|
||||
kube_vip_bgp_peeraddress:
|
||||
kube_vip_bgp_peerpass:
|
||||
kube_vip_bgp_peeras: 65000
|
||||
kube_vip_bgppeers:
|
||||
kube_vip_address:
|
||||
|
||||
# Requests for load balancer app
|
||||
loadbalancer_apiserver_memory_requests: 32M
|
||||
loadbalancer_apiserver_cpu_requests: 25m
|
||||
|
||||
loadbalancer_apiserver_keepalive_timeout: 5m
|
||||
|
||||
# Uncomment if you need to enable deprecated runtimes
|
||||
# kube_api_runtime_config:
|
||||
# - apps/v1beta1=true
|
||||
# - apps/v1beta2=true
|
||||
# - extensions/v1beta1/daemonsets=true
|
||||
# - extensions/v1beta1/deployments=true
|
||||
# - extensions/v1beta1/replicasets=true
|
||||
# - extensions/v1beta1/networkpolicies=true
|
||||
# - extensions/v1beta1/podsecuritypolicies=true
|
||||
|
||||
# A port range to reserve for services with NodePort visibility.
|
||||
# Inclusive at both ends of the range.
|
||||
kube_apiserver_node_port_range: "30000-32767"
|
||||
|
||||
# Configure the amount of pods able to run on single node
|
||||
# default is equal to application default
|
||||
kubelet_max_pods: 110
|
||||
|
||||
# Sets the maximum number of processes running per Pod
|
||||
# Default value -1 = unlimited
|
||||
kubelet_pod_pids_limit: -1
|
||||
|
||||
## Support parameters to be passed to kubelet via kubelet-config.yaml
|
||||
kubelet_config_extra_args: {}
|
||||
|
||||
## Parameters to be passed to kubelet via kubelet-config.yaml when cgroupfs is used as cgroup driver
|
||||
kubelet_config_extra_args_cgroupfs:
|
||||
systemCgroups: /system.slice
|
||||
cgroupRoot: /
|
||||
|
||||
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters
|
||||
kubelet_node_config_extra_args: {}
|
||||
|
||||
# Maximum number of container log files that can be present for a container.
|
||||
kubelet_logfiles_max_nr: 5
|
||||
|
||||
# Maximum size of the container log file before it is rotated
|
||||
kubelet_logfiles_max_size: 10Mi
|
||||
|
||||
## Support custom flags to be passed to kubelet
|
||||
kubelet_custom_flags: []
|
||||
|
||||
## Support custom flags to be passed to kubelet only on nodes, not masters
|
||||
kubelet_node_custom_flags: []
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
kube_override_hostname: >-
|
||||
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
{%- else -%}
|
||||
{{ inventory_hostname }}
|
||||
{%- endif -%}
|
||||
|
||||
# The read-only port for the Kubelet to serve on with no authentication/authorization.
|
||||
kube_read_only_port: 0
|
||||
|
||||
# Port for healthz for Kubelet
|
||||
kubelet_healthz_port: 10248
|
||||
|
||||
# Bind address for healthz for Kubelet
|
||||
kubelet_healthz_bind_address: 127.0.0.1
|
||||
|
||||
# sysctl_file_path to add sysctl conf to
|
||||
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
||||
|
||||
# For the openstack integration kubelet will need credentials to access
|
||||
# openstack apis like nova and cinder. Per default this values will be
|
||||
# read from the environment.
|
||||
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
|
||||
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
|
||||
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
|
||||
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
|
||||
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}"
|
||||
openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
|
||||
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
|
||||
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
|
||||
|
||||
# For the vsphere integration, kubelet will need credentials to access
|
||||
# vsphere apis
|
||||
# Documentation regarding these values can be found
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
|
||||
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
|
||||
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
|
||||
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
|
||||
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
|
||||
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
|
||||
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
|
||||
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
|
||||
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
|
||||
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
|
||||
|
||||
vsphere_scsi_controller_type: pvscsi
|
||||
# vsphere_public_network is name of the network the VMs are joined to
|
||||
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
|
||||
|
||||
## When azure is used, you need to also set the following variables.
|
||||
## see docs/azure.md for details on how to get these values
|
||||
# azure_tenant_id:
|
||||
# azure_subscription_id:
|
||||
# azure_aad_client_id:
|
||||
# azure_aad_client_secret:
|
||||
# azure_resource_group:
|
||||
# azure_location:
|
||||
# azure_subnet_name:
|
||||
# azure_security_group_name:
|
||||
# azure_vnet_name:
|
||||
# azure_route_table_name:
|
||||
# supported values are 'standard' or 'vmss'
|
||||
# azure_vmtype: standard
|
||||
# Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||
azure_loadbalancer_sku: basic
|
||||
# excludes master nodes from standard load balancer.
|
||||
azure_exclude_master_from_standard_lb: true
|
||||
# disables the outbound SNAT for public load balancer rules
|
||||
azure_disable_outbound_snat: false
|
||||
# use instance metadata service where possible
|
||||
azure_use_instance_metadata: true
|
||||
# use specific Azure API endpoints
|
||||
azure_cloud: AzurePublicCloud
|
||||
|
||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
|
||||
# tls_min_version: ""
|
||||
|
||||
## Support tls cipher suites.
|
||||
# tls_cipher_suites:
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
|
||||
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_RSA_WITH_RC4_128_SHA
|
||||
15
kubespray/roles/kubernetes/node/handlers/main.yml
Normal file
15
kubespray/roles/kubernetes/node/handlers/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Node | restart kubelet
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Kubelet | reload systemd
|
||||
- Kubelet | restart kubelet
|
||||
|
||||
- name: Kubelet | reload systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Kubelet | restart kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
@@ -0,0 +1,82 @@
|
||||
---
|
||||
- name: check azure_tenant_id value
|
||||
fail:
|
||||
msg: "azure_tenant_id is missing"
|
||||
when: azure_tenant_id is not defined or not azure_tenant_id
|
||||
|
||||
- name: check azure_subscription_id value
|
||||
fail:
|
||||
msg: "azure_subscription_id is missing"
|
||||
when: azure_subscription_id is not defined or not azure_subscription_id
|
||||
|
||||
- name: check azure_aad_client_id value
|
||||
fail:
|
||||
msg: "azure_aad_client_id is missing"
|
||||
when: azure_aad_client_id is not defined or not azure_aad_client_id
|
||||
|
||||
- name: check azure_aad_client_secret value
|
||||
fail:
|
||||
msg: "azure_aad_client_secret is missing"
|
||||
when: azure_aad_client_secret is not defined or not azure_aad_client_secret
|
||||
|
||||
- name: check azure_resource_group value
|
||||
fail:
|
||||
msg: "azure_resource_group is missing"
|
||||
when: azure_resource_group is not defined or not azure_resource_group
|
||||
|
||||
- name: check azure_location value
|
||||
fail:
|
||||
msg: "azure_location is missing"
|
||||
when: azure_location is not defined or not azure_location
|
||||
|
||||
- name: check azure_subnet_name value
|
||||
fail:
|
||||
msg: "azure_subnet_name is missing"
|
||||
when: azure_subnet_name is not defined or not azure_subnet_name
|
||||
|
||||
- name: check azure_security_group_name value
|
||||
fail:
|
||||
msg: "azure_security_group_name is missing"
|
||||
when: azure_security_group_name is not defined or not azure_security_group_name
|
||||
|
||||
- name: check azure_vnet_name value
|
||||
fail:
|
||||
msg: "azure_vnet_name is missing"
|
||||
when: azure_vnet_name is not defined or not azure_vnet_name
|
||||
|
||||
- name: check azure_vnet_resource_group value
|
||||
fail:
|
||||
msg: "azure_vnet_resource_group is missing"
|
||||
when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group
|
||||
|
||||
- name: check azure_route_table_name value
|
||||
fail:
|
||||
msg: "azure_route_table_name is missing"
|
||||
when: azure_route_table_name is not defined or not azure_route_table_name
|
||||
|
||||
- name: check azure_loadbalancer_sku value
|
||||
fail:
|
||||
msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
|
||||
when: azure_loadbalancer_sku not in ["basic", "standard"]
|
||||
|
||||
- name: "check azure_exclude_master_from_standard_lb is a bool"
|
||||
assert:
|
||||
that: azure_exclude_master_from_standard_lb |type_debug == 'bool'
|
||||
|
||||
- name: "check azure_disable_outbound_snat is a bool"
|
||||
assert:
|
||||
that: azure_disable_outbound_snat |type_debug == 'bool'
|
||||
|
||||
- name: "check azure_use_instance_metadata is a bool"
|
||||
assert:
|
||||
that: azure_use_instance_metadata |type_debug == 'bool'
|
||||
|
||||
- name: check azure_vmtype value
|
||||
fail:
|
||||
msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'"
|
||||
when: azure_vmtype is not defined or not azure_vmtype
|
||||
|
||||
- name: check azure_cloud value
|
||||
fail:
|
||||
msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'."
|
||||
when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"]
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: check openstack_auth_url value
|
||||
fail:
|
||||
msg: "openstack_auth_url is missing"
|
||||
when: openstack_auth_url is not defined or not openstack_auth_url
|
||||
|
||||
- name: check openstack_username value
|
||||
fail:
|
||||
msg: "openstack_username is missing"
|
||||
when: openstack_username is not defined or not openstack_username
|
||||
|
||||
- name: check openstack_password value
|
||||
fail:
|
||||
msg: "openstack_password is missing"
|
||||
when: openstack_password is not defined or not openstack_password
|
||||
|
||||
- name: check openstack_region value
|
||||
fail:
|
||||
msg: "openstack_region is missing"
|
||||
when: openstack_region is not defined or not openstack_region
|
||||
|
||||
- name: check openstack_tenant_id value
|
||||
fail:
|
||||
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
|
||||
when:
|
||||
- openstack_tenant_id is not defined or not openstack_tenant_id
|
||||
- openstack_trust_id is not defined
|
||||
|
||||
- name: check openstack_trust_id value
|
||||
fail:
|
||||
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
|
||||
when:
|
||||
- openstack_trust_id is not defined or not openstack_trust_id
|
||||
- openstack_tenant_id is not defined
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: check vsphere environment variables
|
||||
fail:
|
||||
msg: "{{ item.name }} is missing"
|
||||
when: item.value is not defined or not item.value
|
||||
with_items:
|
||||
- name: vsphere_vcenter_ip
|
||||
value: "{{ vsphere_vcenter_ip }}"
|
||||
- name: vsphere_vcenter_port
|
||||
value: "{{ vsphere_vcenter_port }}"
|
||||
- name: vsphere_user
|
||||
value: "{{ vsphere_user }}"
|
||||
- name: vsphere_password
|
||||
value: "{{ vsphere_password }}"
|
||||
- name: vsphere_datacenter
|
||||
value: "{{ vsphere_datacenter }}"
|
||||
- name: vsphere_datastore
|
||||
value: "{{ vsphere_datastore }}"
|
||||
- name: vsphere_working_dir
|
||||
value: "{{ vsphere_working_dir }}"
|
||||
- name: vsphere_insecure
|
||||
value: "{{ vsphere_insecure }}"
|
||||
57
kubespray/roles/kubernetes/node/tasks/facts.yml
Normal file
57
kubespray/roles/kubernetes/node/tasks/facts.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
- block:
|
||||
- name: look up docker cgroup driver
|
||||
shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
|
||||
register: docker_cgroup_driver_result
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: set kubelet_cgroup_driver_detected fact for docker
|
||||
set_fact:
|
||||
kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
|
||||
when: container_manager == 'docker'
|
||||
|
||||
- block:
|
||||
- name: look up crio cgroup driver
|
||||
shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
|
||||
register: crio_cgroup_driver_result
|
||||
changed_when: false
|
||||
|
||||
- name: set kubelet_cgroup_driver_detected fact for crio
|
||||
set_fact:
|
||||
kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}"
|
||||
when: container_manager == 'crio'
|
||||
|
||||
- name: set kubelet_cgroup_driver_detected fact for containerd
|
||||
set_fact:
|
||||
kubelet_cgroup_driver_detected: >-
|
||||
{%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%}
|
||||
when: container_manager == 'containerd'
|
||||
|
||||
- name: set kubelet_cgroup_driver
|
||||
set_fact:
|
||||
kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}"
|
||||
when: kubelet_cgroup_driver is undefined
|
||||
|
||||
- name: set kubelet_cgroups options when cgroupfs is used
|
||||
set_fact:
|
||||
kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}"
|
||||
kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}"
|
||||
when: kubelet_cgroup_driver == 'cgroupfs'
|
||||
|
||||
- name: set kubelet_config_extra_args options when cgroupfs is used
|
||||
vars:
|
||||
set_fact:
|
||||
kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}"
|
||||
when: kubelet_cgroup_driver == 'cgroupfs'
|
||||
|
||||
- name: os specific vars
|
||||
include_vars: "{{ item }}"
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}.yml"
|
||||
- "{{ ansible_os_family|lower }}.yml"
|
||||
skip: true
|
||||
22
kubespray/roles/kubernetes/node/tasks/install.yml
Normal file
22
kubespray/roles/kubernetes/node/tasks/install.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: install | Copy kubeadm binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}"
|
||||
dest: "{{ bin_dir }}/kubeadm"
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
tags:
|
||||
- kubeadm
|
||||
when:
|
||||
- not inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- name: install | Copy kubelet binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/kubelet-{{ kube_version }}-{{ image_arch }}"
|
||||
dest: "{{ bin_dir }}/kubelet"
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
tags:
|
||||
- kubelet
|
||||
- upgrade
|
||||
notify: Node | restart kubelet
|
||||
52
kubespray/roles/kubernetes/node/tasks/kubelet.yml
Normal file
52
kubespray/roles/kubernetes/node/tasks/kubelet.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
- name: Set kubelet api version to v1beta1
|
||||
set_fact:
|
||||
kubeletConfig_api_version: v1beta1
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Write kubelet environment config file (kubeadm)
|
||||
template:
|
||||
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
|
||||
dest: "{{ kube_config_dir }}/kubelet.env"
|
||||
setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}"
|
||||
backup: yes
|
||||
mode: 0640
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Write kubelet config file
|
||||
template:
|
||||
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
|
||||
mode: 0640
|
||||
notify: Kubelet | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: Write kubelet systemd init file
|
||||
template:
|
||||
src: "kubelet.service.j2"
|
||||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
mode: 0644
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: flush_handlers and reload-systemd
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Enable kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
enabled: yes
|
||||
state: started
|
||||
tags:
|
||||
- kubelet
|
||||
notify: Kubelet | restart kubelet
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: haproxy | Cleanup potentially deployed nginx-proxy
|
||||
file:
|
||||
path: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
||||
state: absent
|
||||
|
||||
- name: haproxy | Make haproxy directory
|
||||
file:
|
||||
path: "{{ haproxy_config_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
|
||||
- name: haproxy | Write haproxy configuration
|
||||
template:
|
||||
src: "loadbalancer/haproxy.cfg.j2"
|
||||
dest: "{{ haproxy_config_dir }}/haproxy.cfg"
|
||||
owner: root
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: haproxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ haproxy_config_dir }}/haproxy.cfg"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: haproxy_stat
|
||||
|
||||
- name: haproxy | Write static pod
|
||||
template:
|
||||
src: manifests/haproxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/haproxy.yml"
|
||||
mode: 0640
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: kube-vip | Check cluster settings for kube-vip
|
||||
fail:
|
||||
msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md"
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp
|
||||
- kube_vip_arp_enabled
|
||||
|
||||
- name: kube-vip | Write static pod
|
||||
template:
|
||||
src: manifests/kube-vip.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-vip.yml"
|
||||
mode: 0640
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: haproxy | Cleanup potentially deployed haproxy
|
||||
file:
|
||||
path: "{{ kube_manifest_dir }}/haproxy.yml"
|
||||
state: absent
|
||||
|
||||
- name: nginx-proxy | Make nginx directory
|
||||
file:
|
||||
path: "{{ nginx_config_dir }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
|
||||
- name: nginx-proxy | Write nginx-proxy configuration
|
||||
template:
|
||||
src: "loadbalancer/nginx.conf.j2"
|
||||
dest: "{{ nginx_config_dir }}/nginx.conf"
|
||||
owner: root
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: nginx-proxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ nginx_config_dir }}/nginx.conf"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: nginx_stat
|
||||
|
||||
- name: nginx-proxy | Write static pod
|
||||
template:
|
||||
src: manifests/nginx-proxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
|
||||
mode: 0640
|
||||
193
kubespray/roles/kubernetes/node/tasks/main.yml
Normal file
193
kubespray/roles/kubernetes/node/tasks/main.yml
Normal file
@@ -0,0 +1,193 @@
|
||||
---
|
||||
- import_tasks: facts.yml
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- import_tasks: pre_upgrade.yml
|
||||
tags:
|
||||
- kubelet
|
||||
|
||||
- name: Ensure /var/lib/cni exists
|
||||
file:
|
||||
path: /var/lib/cni
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- import_tasks: install.yml
|
||||
tags:
|
||||
- kubelet
|
||||
|
||||
- import_tasks: loadbalancer/kube-vip.yml
|
||||
when:
|
||||
- is_kube_master
|
||||
- kube_vip_enabled
|
||||
tags:
|
||||
- kube-vip
|
||||
|
||||
- import_tasks: loadbalancer/nginx-proxy.yml
|
||||
when:
|
||||
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
|
||||
- loadbalancer_apiserver_localhost
|
||||
- loadbalancer_apiserver_type == 'nginx'
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
- import_tasks: loadbalancer/haproxy.yml
|
||||
when:
|
||||
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
|
||||
- loadbalancer_apiserver_localhost
|
||||
- loadbalancer_apiserver_type == 'haproxy'
|
||||
tags:
|
||||
- haproxy
|
||||
|
||||
- name: Ensure nodePort range is reserved
|
||||
sysctl:
|
||||
name: net.ipv4.ip_local_reserved_ports
|
||||
value: "{{ kube_apiserver_node_port_range }}"
|
||||
sysctl_set: yes
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: yes
|
||||
when: kube_apiserver_node_port_range is defined
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Verify if br_netfilter module exists
|
||||
command: "modinfo br_netfilter"
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||
register: modinfo_br_netfilter
|
||||
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: Verify br_netfilter module path exists
|
||||
file:
|
||||
path: /etc/modules-load.d
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Enable br_netfilter module
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when: modinfo_br_netfilter.rc == 0
|
||||
|
||||
- name: Persist br_netfilter module
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
|
||||
content: br_netfilter
|
||||
mode: 0644
|
||||
when: modinfo_br_netfilter.rc == 0
|
||||
|
||||
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
|
||||
- name: Check if bridge-nf-call-iptables key exists
|
||||
command: "sysctl net.bridge.bridge-nf-call-iptables"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
register: sysctl_bridge_nf_call_iptables
|
||||
|
||||
- name: Enable bridge-nf-call tables
|
||||
sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
value: "1"
|
||||
reload: yes
|
||||
when: sysctl_bridge_nf_call_iptables.rc == 0
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
|
||||
- name: Modprobe Kernel Module for IPVS
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- ip_vs
|
||||
- ip_vs_rr
|
||||
- ip_vs_wrr
|
||||
- ip_vs_sh
|
||||
when: kube_proxy_mode == 'ipvs'
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Modprobe nf_conntrack_ipv4
|
||||
modprobe:
|
||||
name: nf_conntrack_ipv4
|
||||
state: present
|
||||
register: modprobe_nf_conntrack_ipv4
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs'
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Persist ip_vs modules
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
|
||||
mode: 0644
|
||||
content: |
|
||||
ip_vs
|
||||
ip_vs_rr
|
||||
ip_vs_wrr
|
||||
ip_vs_sh
|
||||
{% if modprobe_nf_conntrack_ipv4 is success -%}
|
||||
nf_conntrack_ipv4
|
||||
{%- endif -%}
|
||||
when: kube_proxy_mode == 'ipvs'
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
|
||||
tags:
|
||||
- cloud-provider
|
||||
- facts
|
||||
|
||||
- name: Test if openstack_cacert is a base64 string
|
||||
set_fact:
|
||||
openstack_cacert_is_base64: "{% if openstack_cacert is search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'openstack'
|
||||
- openstack_cacert is defined
|
||||
- openstack_cacert | length > 0
|
||||
|
||||
|
||||
- name: Write cacert file
|
||||
copy:
|
||||
src: "{{ openstack_cacert if not openstack_cacert_is_base64 else omit }}"
|
||||
content: "{{ openstack_cacert | b64decode if openstack_cacert_is_base64 else omit }}"
|
||||
dest: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'openstack'
|
||||
- openstack_cacert is defined
|
||||
- openstack_cacert | length > 0
|
||||
tags:
|
||||
- cloud-provider
|
||||
|
||||
- name: Write cloud-config
|
||||
template:
|
||||
src: "cloud-configs/{{ cloud_provider }}-cloud-config.j2"
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws', 'gce' ]
|
||||
notify: Node | restart kubelet
|
||||
tags:
|
||||
- cloud-provider
|
||||
|
||||
- import_tasks: kubelet.yml
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
48
kubespray/roles/kubernetes/node/tasks/pre_upgrade.yml
Normal file
48
kubespray/roles/kubernetes/node/tasks/pre_upgrade.yml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: "Pre-upgrade | check if kubelet container exists"
|
||||
shell: >-
|
||||
set -o pipefail &&
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
{{ docker_bin_dir }}/docker ps -af name=kubelet | grep kubelet
|
||||
{% elif container_manager == 'containerd' %}
|
||||
{{ bin_dir }}/crictl ps --all --name kubelet | grep kubelet
|
||||
{% endif %}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
register: kubelet_container_check
|
||||
|
||||
- name: "Pre-upgrade | copy /var/lib/cni from kubelet"
|
||||
command: >-
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
docker cp kubelet:/var/lib/cni /var/lib/cni
|
||||
{% elif container_manager == 'containerd' %}
|
||||
ctr run --rm --mount type=bind,src=/var/lib/cni,dst=/cnilibdir,options=rbind:rw kubelet kubelet-tmp sh -c 'cp /var/lib/cni/* /cnilibdir/'
|
||||
{% endif %}
|
||||
args:
|
||||
creates: "/var/lib/cni"
|
||||
failed_when: false
|
||||
when: kubelet_container_check.rc == 0
|
||||
|
||||
- name: "Pre-upgrade | ensure kubelet container service is stopped if using host deployment"
|
||||
service:
|
||||
name: kubelet
|
||||
state: stopped
|
||||
when: kubelet_container_check.rc == 0
|
||||
|
||||
- name: "Pre-upgrade | ensure kubelet container is removed if using host deployment"
|
||||
shell: >-
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
{{ docker_bin_dir }}/docker rm -fv kubelet
|
||||
{% elif container_manager == 'containerd' %}
|
||||
{{ bin_dir }}/crictl stop kubelet && {{ bin_dir }}/crictl rm kubelet
|
||||
{% endif %}
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: remove_kubelet_container
|
||||
retries: 4
|
||||
until: remove_kubelet_container.rc == 0
|
||||
delay: 5
|
||||
when: kubelet_container_check.rc == 0
|
||||
@@ -0,0 +1,11 @@
|
||||
[Global]
|
||||
zone={{ aws_zone|default("") }}
|
||||
vpc={{ aws_vpc|default("") }}
|
||||
subnetId={{ aws_subnet_id|default("") }}
|
||||
routeTableId={{ aws_route_table_id|default("") }}
|
||||
roleArn={{ aws_role_arn|default("") }}
|
||||
kubernetesClusterTag={{ aws_kubernetes_cluster_tag|default("") }}
|
||||
kubernetesClusterId={{ aws_kubernetes_cluster_id|default("") }}
|
||||
disableSecurityGroupIngress={{ "true" if aws_disable_security_group_ingress|default(False) else "false" }}
|
||||
disableStrictZoneCheck={{ "true" if aws_disable_strict_zone_check|default(False) else "false" }}
|
||||
elbSecurityGroup={{ aws_elb_security_group|default("") }}
|
||||
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"cloud": "{{ azure_cloud }}",
|
||||
"tenantId": "{{ azure_tenant_id }}",
|
||||
"subscriptionId": "{{ azure_subscription_id }}",
|
||||
"aadClientId": "{{ azure_aad_client_id }}",
|
||||
"aadClientSecret": "{{ azure_aad_client_secret }}",
|
||||
"resourceGroup": "{{ azure_resource_group }}",
|
||||
"location": "{{ azure_location }}",
|
||||
"subnetName": "{{ azure_subnet_name }}",
|
||||
"securityGroupName": "{{ azure_security_group_name }}",
|
||||
"securityGroupResourceGroup": "{{ azure_security_group_resource_group | default(azure_vnet_resource_group) }}",
|
||||
"vnetName": "{{ azure_vnet_name }}",
|
||||
"vnetResourceGroup": "{{ azure_vnet_resource_group }}",
|
||||
"routeTableName": "{{ azure_route_table_name }}",
|
||||
"routeTableResourceGroup": "{{ azure_route_table_resource_group | default(azure_vnet_resource_group) }}",
|
||||
"vmType": "{{ azure_vmtype }}",
|
||||
{% if azure_primary_availability_set_name is defined %}
|
||||
"primaryAvailabilitySetName": "{{ azure_primary_availability_set_name }}",
|
||||
{%endif%}
|
||||
"useInstanceMetadata": {{azure_use_instance_metadata | lower }},
|
||||
{% if azure_loadbalancer_sku == "standard" %}
|
||||
"excludeMasterFromStandardLB": {{ azure_exclude_master_from_standard_lb | lower }},
|
||||
"disableOutboundSNAT": {{ azure_disable_outbound_snat | lower }},
|
||||
{% endif%}
|
||||
"loadBalancerSku": "{{ azure_loadbalancer_sku }}"
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
[global]
|
||||
node-tags = {{ gce_node_tags }}
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
[Global]
|
||||
auth-url="{{ openstack_auth_url }}"
|
||||
username="{{ openstack_username }}"
|
||||
password="{{ openstack_password }}"
|
||||
region="{{ openstack_region }}"
|
||||
{% if openstack_trust_id is defined and openstack_trust_id != "" %}
|
||||
trust-id="{{ openstack_trust_id }}"
|
||||
{% else %}
|
||||
tenant-id="{{ openstack_tenant_id }}"
|
||||
{% endif %}
|
||||
{% if openstack_tenant_name is defined and openstack_tenant_name != "" %}
|
||||
tenant-name="{{ openstack_tenant_name }}"
|
||||
{% endif %}
|
||||
{% if openstack_domain_name is defined and openstack_domain_name != "" %}
|
||||
domain-name="{{ openstack_domain_name }}"
|
||||
{% elif openstack_domain_id is defined and openstack_domain_id != "" %}
|
||||
domain-id ="{{ openstack_domain_id }}"
|
||||
{% endif %}
|
||||
{% if openstack_cacert is defined and openstack_cacert != "" %}
|
||||
ca-file="{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
{% endif %}
|
||||
|
||||
[BlockStorage]
|
||||
{% if openstack_blockstorage_version is defined %}
|
||||
bs-version={{ openstack_blockstorage_version }}
|
||||
{% endif %}
|
||||
{% if openstack_blockstorage_ignore_volume_az is defined and openstack_blockstorage_ignore_volume_az|bool %}
|
||||
ignore-volume-az={{ openstack_blockstorage_ignore_volume_az }}
|
||||
{% endif %}
|
||||
{% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %}
|
||||
node-volume-attach-limit="{{ node_volume_attach_limit }}"
|
||||
{% endif %}
|
||||
|
||||
{% if openstack_lbaas_enabled and openstack_lbaas_subnet_id is defined %}
|
||||
[LoadBalancer]
|
||||
subnet-id={{ openstack_lbaas_subnet_id }}
|
||||
{% if openstack_lbaas_floating_network_id is defined %}
|
||||
floating-network-id={{ openstack_lbaas_floating_network_id }}
|
||||
{% endif %}
|
||||
{% if openstack_lbaas_use_octavia is defined %}
|
||||
use-octavia={{ openstack_lbaas_use_octavia }}
|
||||
{% endif %}
|
||||
{% if openstack_lbaas_method is defined %}
|
||||
lb-method={{ openstack_lbaas_method }}
|
||||
{% endif %}
|
||||
{% if openstack_lbaas_provider is defined %}
|
||||
lb-provider={{ openstack_lbaas_provider }}
|
||||
{% endif %}
|
||||
|
||||
create-monitor={{ openstack_lbaas_create_monitor }}
|
||||
monitor-delay={{ openstack_lbaas_monitor_delay }}
|
||||
monitor-timeout={{ openstack_lbaas_monitor_timeout }}
|
||||
monitor-max-retries={{ openstack_lbaas_monitor_max_retries }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,36 @@
|
||||
[Global]
|
||||
user = "{{ vsphere_user }}"
|
||||
password = "{{ vsphere_password }}"
|
||||
port = {{ vsphere_vcenter_port }}
|
||||
insecure-flag = {{ vsphere_insecure }}
|
||||
|
||||
datacenters = "{{ vsphere_datacenter }}"
|
||||
|
||||
[VirtualCenter "{{ vsphere_vcenter_ip }}"]
|
||||
|
||||
|
||||
[Workspace]
|
||||
server = "{{ vsphere_vcenter_ip }}"
|
||||
datacenter = "{{ vsphere_datacenter }}"
|
||||
folder = "{{ vsphere_working_dir }}"
|
||||
default-datastore = "{{ vsphere_datastore }}"
|
||||
{% if vsphere_resource_pool is defined and vsphere_resource_pool != "" %}
|
||||
resourcepool-path = "{{ vsphere_resource_pool }}"
|
||||
{% endif %}
|
||||
|
||||
|
||||
[Disk]
|
||||
scsicontrollertype = {{ vsphere_scsi_controller_type }}
|
||||
|
||||
{% if vsphere_public_network is defined and vsphere_public_network != "" %}
|
||||
[Network]
|
||||
public-network = {{ vsphere_public_network }}
|
||||
{% endif %}
|
||||
|
||||
[Labels]
|
||||
{% if vsphere_zone_category is defined and vsphere_zone_category != "" %}
|
||||
zone = {{ vsphere_zone_category }}
|
||||
{% endif %}
|
||||
{% if vsphere_region_category is defined and vsphere_region_category != "" %}
|
||||
region = {{ vsphere_region_category }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,2 @@
|
||||
[Service]
|
||||
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
|
||||
@@ -0,0 +1,151 @@
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
nodeStatusUpdateFrequency: "{{ kubelet_status_update_frequency }}"
|
||||
failSwapOn: {{ kubelet_fail_swap_on|default(true) }}
|
||||
authentication:
|
||||
anonymous:
|
||||
enabled: false
|
||||
webhook:
|
||||
enabled: {{ kubelet_authentication_token_webhook }}
|
||||
x509:
|
||||
clientCAFile: {{ kube_cert_dir }}/ca.crt
|
||||
authorization:
|
||||
{% if kubelet_authorization_mode_webhook %}
|
||||
mode: Webhook
|
||||
{% else %}
|
||||
mode: AlwaysAllow
|
||||
{% endif %}
|
||||
{% if kubelet_enforce_node_allocatable is defined and kubelet_enforce_node_allocatable != "\"\"" %}
|
||||
{% set kubelet_enforce_node_allocatable_list = kubelet_enforce_node_allocatable.split() %}
|
||||
enforceNodeAllocatable:
|
||||
{% for item in kubelet_enforce_node_allocatable_list %}
|
||||
- {{ item }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
staticPodPath: {{ kube_manifest_dir }}
|
||||
cgroupDriver: {{ kubelet_cgroup_driver | default('systemd') }}
|
||||
containerLogMaxFiles: {{ kubelet_logfiles_max_nr }}
|
||||
containerLogMaxSize: {{ kubelet_logfiles_max_size }}
|
||||
maxPods: {{ kubelet_max_pods }}
|
||||
podPidsLimit: {{ kubelet_pod_pids_limit }}
|
||||
address: {{ kubelet_bind_address }}
|
||||
readOnlyPort: {{ kube_read_only_port }}
|
||||
healthzPort: {{ kubelet_healthz_port }}
|
||||
healthzBindAddress: {{ kubelet_healthz_bind_address }}
|
||||
kubeletCgroups: {{ kubelet_kubelet_cgroups }}
|
||||
clusterDomain: {{ dns_domain }}
|
||||
{% if kubelet_protect_kernel_defaults|bool %}
|
||||
protectKernelDefaults: true
|
||||
{% endif %}
|
||||
{% if kubelet_rotate_certificates|bool %}
|
||||
rotateCertificates: true
|
||||
{% endif %}
|
||||
{% if kubelet_rotate_server_certificates|bool %}
|
||||
serverTLSBootstrap: true
|
||||
{% endif %}
|
||||
{# DNS settings for kubelet #}
|
||||
{% if enable_nodelocaldns %}
|
||||
{% set kubelet_cluster_dns = [nodelocaldns_ip] %}
|
||||
{% elif dns_mode in ['coredns'] %}
|
||||
{% set kubelet_cluster_dns = [skydns_server] %}
|
||||
{% elif dns_mode == 'coredns_dual' %}
|
||||
{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %}
|
||||
{% elif dns_mode == 'manual' %}
|
||||
{% set kubelet_cluster_dns = [manual_dns_server] %}
|
||||
{% else %}
|
||||
{% set kubelet_cluster_dns = [] %}
|
||||
{% endif %}
|
||||
clusterDNS:
|
||||
{% for dns_address in kubelet_cluster_dns %}
|
||||
- {{ dns_address }}
|
||||
{% endfor %}
|
||||
{# Node reserved CPU/memory #}
|
||||
kubeReserved:
|
||||
{% if is_kube_master|bool %}
|
||||
cpu: {{ kube_master_cpu_reserved }}
|
||||
memory: {{ kube_master_memory_reserved }}
|
||||
{% if kube_master_ephemeral_storage_reserved is defined %}
|
||||
ephemeral-storage: {{ kube_master_ephemeral_storage_reserved }}
|
||||
{% endif %}
|
||||
{% if kube_master_pid_reserved is defined %}
|
||||
pid: "{{ kube_master_pid_reserved }}"
|
||||
{% endif %}
|
||||
{% else %}
|
||||
cpu: {{ kube_cpu_reserved }}
|
||||
memory: {{ kube_memory_reserved }}
|
||||
{% if kube_ephemeral_storage_reserved is defined %}
|
||||
ephemeral-storage: {{ kube_ephemeral_storage_reserved }}
|
||||
{% endif %}
|
||||
{% if kube_pid_reserved is defined %}
|
||||
pid: "{{ kube_pid_reserved }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if system_reserved is defined and system_reserved %}
|
||||
systemReserved:
|
||||
{% if is_kube_master|bool %}
|
||||
cpu: {{ system_master_cpu_reserved }}
|
||||
memory: {{ system_master_memory_reserved }}
|
||||
{% if system_master_ephemeral_storage_reserved is defined %}
|
||||
ephemeral-storage: {{ system_master_ephemeral_storage_reserved }}
|
||||
{% endif %}
|
||||
{% if system_master_pid_reserved is defined %}
|
||||
pid: "{{ system_master_pid_reserved }}"
|
||||
{% endif %}
|
||||
{% else %}
|
||||
cpu: {{ system_cpu_reserved }}
|
||||
memory: {{ system_memory_reserved }}
|
||||
{% if system_ephemeral_storage_reserved is defined %}
|
||||
ephemeral-storage: {{ system_ephemeral_storage_reserved }}
|
||||
{% endif %}
|
||||
{% if system_pid_reserved is defined %}
|
||||
pid: "{{ system_pid_reserved }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if is_kube_master|bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
|
||||
evictionHard:
|
||||
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
|
||||
{% elif not is_kube_master|bool and eviction_hard is defined and eviction_hard %}
|
||||
evictionHard:
|
||||
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
|
||||
{% endif %}
|
||||
resolvConf: "{{ kube_resolv_conf }}"
|
||||
{% if kubelet_config_extra_args %}
|
||||
{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }}
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %}
|
||||
{{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }}
|
||||
{% endif %}
|
||||
{% if kubelet_feature_gates or kube_feature_gates %}
|
||||
featureGates:
|
||||
{% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tlsMinVersion: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tlsCipherSuites:
|
||||
{% for tls in tls_cipher_suites %}
|
||||
- {{ tls }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if kubelet_event_record_qps %}
|
||||
eventRecordQPS: {{ kubelet_event_record_qps }}
|
||||
{% endif %}
|
||||
shutdownGracePeriod: {{ kubelet_shutdown_grace_period }}
|
||||
shutdownGracePeriodCriticalPods: {{ kubelet_shutdown_grace_period_critical_pods }}
|
||||
{% if not kubelet_fail_swap_on|default(true) %}
|
||||
memorySwap:
|
||||
swapBehavior: {{ kubelet_swap_behavior|default("LimitedSwap") }}
|
||||
{% endif %}
|
||||
{% if kubelet_streaming_connection_idle_timeout is defined %}
|
||||
streamingConnectionIdleTimeout: {{ kubelet_streaming_connection_idle_timeout }}
|
||||
{% endif %}
|
||||
{% if kubelet_make_iptables_util_chains is defined %}
|
||||
makeIPTablesUtilChains: {{ kubelet_make_iptables_util_chains | bool }}
|
||||
{% endif %}
|
||||
{% if kubelet_seccomp_default is defined %}
|
||||
seccompDefault: {{ kubelet_seccomp_default | bool }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,43 @@
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
|
||||
KUBELET_ADDRESS="--node-ip={{ kubelet_address }}"
|
||||
{% if kube_override_hostname|default('') %}
|
||||
KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||
{% endif %}
|
||||
|
||||
{# Base kubelet args #}
|
||||
{% set kubelet_args_base -%}
|
||||
{# start kubeadm specific settings #}
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \
|
||||
--config={{ kube_config_dir }}/kubelet-config.yaml \
|
||||
--kubeconfig={{ kube_config_dir }}/kubelet.conf \
|
||||
{# end kubeadm specific settings #}
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint={{ cri_socket }} \
|
||||
--runtime-cgroups={{ kubelet_runtime_cgroups }} \
|
||||
{% endset %}
|
||||
|
||||
{# Kubelet node taints for gpu #}
|
||||
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
||||
{% if inventory_hostname in nvidia_gpu_nodes and node_taints is defined %}
|
||||
{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %}
|
||||
{% elif inventory_hostname in nvidia_gpu_nodes and node_taints is not defined %}
|
||||
{% set node_taints = [] %}
|
||||
{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
||||
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
||||
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "cloud" %}
|
||||
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce", "external"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
||||
{% else %}
|
||||
KUBELET_CLOUDPROVIDER=""
|
||||
{% endif %}
|
||||
|
||||
PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
34
kubespray/roles/kubernetes/node/templates/kubelet.service.j2
Normal file
34
kubespray/roles/kubernetes/node/templates/kubelet.service.j2
Normal file
@@ -0,0 +1,34 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
After={{ container_manager }}.service
|
||||
{% if container_manager == 'docker' %}
|
||||
Wants=docker.socket
|
||||
{% else %}
|
||||
Wants={{ container_manager }}.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-{{ kube_config_dir }}/kubelet.env
|
||||
ExecStart={{ bin_dir }}/kubelet \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBELET_API_SERVER \
|
||||
$KUBELET_ADDRESS \
|
||||
$KUBELET_PORT \
|
||||
$KUBELET_HOSTNAME \
|
||||
$KUBELET_ARGS \
|
||||
$DOCKER_SOCKET \
|
||||
$KUBELET_NETWORK_PLUGIN \
|
||||
$KUBELET_VOLUME_PLUGIN \
|
||||
$KUBELET_CLOUDPROVIDER
|
||||
Restart=always
|
||||
RestartSec=10s
|
||||
{% if kubelet_systemd_hardening %}
|
||||
# Hardening setup
|
||||
IPAddressDeny=any
|
||||
IPAddressAllow={{ kubelet_secure_addresses }}
|
||||
{% endif %}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,43 @@
|
||||
global
|
||||
maxconn 4000
|
||||
log 127.0.0.1 local0
|
||||
|
||||
defaults
|
||||
mode http
|
||||
log global
|
||||
option httplog
|
||||
option dontlognull
|
||||
option http-server-close
|
||||
option redispatch
|
||||
retries 5
|
||||
timeout http-request 5m
|
||||
timeout queue 5m
|
||||
timeout connect 30s
|
||||
timeout client {{ loadbalancer_apiserver_keepalive_timeout }}
|
||||
timeout server 15m
|
||||
timeout http-keep-alive 30s
|
||||
timeout check 30s
|
||||
maxconn 4000
|
||||
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
frontend healthz
|
||||
bind *:{{ loadbalancer_apiserver_healthcheck_port }}
|
||||
mode http
|
||||
monitor-uri /healthz
|
||||
{% endif %}
|
||||
|
||||
frontend kube_api_frontend
|
||||
bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
mode tcp
|
||||
option tcplog
|
||||
default_backend kube_api_backend
|
||||
|
||||
backend kube_api_backend
|
||||
mode tcp
|
||||
balance leastconn
|
||||
default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
|
||||
option httpchk GET /healthz
|
||||
http-check expect status 200
|
||||
{% for host in groups['kube_control_plane'] -%}
|
||||
server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none
|
||||
{% endfor -%}
|
||||
@@ -0,0 +1,60 @@
|
||||
error_log stderr notice;
|
||||
|
||||
worker_processes 2;
|
||||
worker_rlimit_nofile 130048;
|
||||
worker_shutdown_timeout 10s;
|
||||
|
||||
events {
|
||||
multi_accept on;
|
||||
use epoll;
|
||||
worker_connections 16384;
|
||||
}
|
||||
|
||||
stream {
|
||||
upstream kube_apiserver {
|
||||
least_conn;
|
||||
{% for host in groups['kube_control_plane'] -%}
|
||||
server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }};
|
||||
{% endfor -%}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
{% if enable_dual_stack_networks -%}
|
||||
listen [::]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
{% endif -%}
|
||||
proxy_pass kube_apiserver;
|
||||
proxy_timeout 10m;
|
||||
proxy_connect_timeout 1s;
|
||||
}
|
||||
}
|
||||
|
||||
http {
|
||||
aio threads;
|
||||
aio_write on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
|
||||
keepalive_timeout {{ loadbalancer_apiserver_keepalive_timeout }};
|
||||
keepalive_requests 100;
|
||||
reset_timedout_connection on;
|
||||
server_tokens off;
|
||||
autoindex off;
|
||||
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
server {
|
||||
listen {{ loadbalancer_apiserver_healthcheck_port }};
|
||||
{% if enable_dual_stack_networks -%}
|
||||
listen [::]:{{ loadbalancer_apiserver_healthcheck_port }};
|
||||
{% endif -%}
|
||||
location /healthz {
|
||||
access_log off;
|
||||
return 200;
|
||||
}
|
||||
location /stub_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: kube-haproxy
|
||||
annotations:
|
||||
haproxy-cfg-checksum: "{{ haproxy_stat.stat.checksum }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ loadbalancer_apiserver_cpu_requests }}
|
||||
memory: {{ loadbalancer_apiserver_memory_requests }}
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: {{ loadbalancer_apiserver_healthcheck_port }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: {{ loadbalancer_apiserver_healthcheck_port }}
|
||||
{% endif -%}
|
||||
volumeMounts:
|
||||
- mountPath: /usr/local/etc/haproxy/
|
||||
name: etc-haproxy
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: etc-haproxy
|
||||
hostPath:
|
||||
path: {{ haproxy_config_dir }}
|
||||
@@ -0,0 +1,93 @@
|
||||
# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.5.5/pkg/kubevip/config_generator.go#L13
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: {{ kube_vip_arp_enabled | string | to_json }}
|
||||
- name: port
|
||||
value: {{ kube_apiserver_port | string | to_json }}
|
||||
{% if kube_vip_interface %}
|
||||
- name: vip_interface
|
||||
value: {{ kube_vip_interface | string | to_json }}
|
||||
{% endif %}
|
||||
{% if kube_vip_services_interface %}
|
||||
- name: vip_servicesinterface
|
||||
value: {{ kube_vip_services_interface | string | to_json }}
|
||||
{% endif %}
|
||||
{% if kube_vip_cidr %}
|
||||
- name: vip_cidr
|
||||
value: {{ kube_vip_cidr | string | to_json }}
|
||||
{% endif %}
|
||||
{% if kube_vip_controlplane_enabled %}
|
||||
- name: cp_enable
|
||||
value: "true"
|
||||
- name: cp_namespace
|
||||
value: kube-system
|
||||
- name: vip_ddns
|
||||
value: {{ kube_vip_ddns_enabled | string | to_json }}
|
||||
{% endif %}
|
||||
{% if kube_vip_services_enabled %}
|
||||
- name: svc_enable
|
||||
value: "true"
|
||||
{% endif %}
|
||||
{% if kube_vip_leader_election_enabled %}
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leaseduration
|
||||
value: "5"
|
||||
- name: vip_renewdeadline
|
||||
value: "3"
|
||||
- name: vip_retryperiod
|
||||
value: "1"
|
||||
{% endif %}
|
||||
{% if kube_vip_bgp_enabled %}
|
||||
- name: bgp_enable
|
||||
value: "true"
|
||||
- name: bgp_routerid
|
||||
value: {{ kube_vip_bgp_routerid | string | to_json }}
|
||||
- name: bgp_as
|
||||
value: {{ kube_vip_local_as | string | to_json }}
|
||||
- name: bgp_peeraddress
|
||||
value: {{ kube_vip_bgp_peeraddress | to_json }}
|
||||
- name: bgp_peerpass
|
||||
value: {{ kube_vip_bgp_peerpass | to_json }}
|
||||
- name: bgp_peeras
|
||||
value: {{ kube_vip_bgp_peeras | string | to_json }}
|
||||
{% if kube_vip_bgppeers %}
|
||||
- name: bgp_peers
|
||||
value: {{ kube_vip_bgppeers | join(',') | to_json }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
- name: address
|
||||
value: {{ kube_vip_address | to_json }}
|
||||
image: {{ kube_vip_image_repo }}:{{ kube_vip_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/admin.conf
|
||||
name: kubeconfig
|
||||
hostAliases:
|
||||
- hostnames:
|
||||
- kubernetes
|
||||
ip: 127.0.0.1
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/admin.conf
|
||||
name: kubeconfig
|
||||
status: {}
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: kube-nginx
|
||||
annotations:
|
||||
nginx-cfg-checksum: "{{ nginx_stat.stat.checksum }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- name: nginx-proxy
|
||||
image: {{ nginx_image_repo }}:{{ nginx_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ loadbalancer_apiserver_cpu_requests }}
|
||||
memory: {{ loadbalancer_apiserver_memory_requests }}
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: {{ loadbalancer_apiserver_healthcheck_port }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: {{ loadbalancer_apiserver_healthcheck_port }}
|
||||
{% endif -%}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx
|
||||
name: etc-nginx
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: etc-nginx
|
||||
hostPath:
|
||||
path: {{ nginx_config_dir }}
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.pem
|
||||
server: {{ kube_apiserver_endpoint }}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
client-key: {{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: kubelet-{{ cluster_name }}
|
||||
current-context: kubelet-{{ cluster_name }}
|
||||
2
kubespray/roles/kubernetes/node/vars/fedora.yml
Normal file
2
kubespray/roles/kubernetes/node/vars/fedora.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
kube_resolv_conf: "/run/systemd/resolve/resolv.conf"
|
||||
2
kubespray/roles/kubernetes/node/vars/ubuntu-18.yml
Normal file
2
kubespray/roles/kubernetes/node/vars/ubuntu-18.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
kube_resolv_conf: "/run/systemd/resolve/resolv.conf"
|
||||
2
kubespray/roles/kubernetes/node/vars/ubuntu-20.yml
Normal file
2
kubespray/roles/kubernetes/node/vars/ubuntu-20.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
kube_resolv_conf: "/run/systemd/resolve/resolv.conf"
|
||||
2
kubespray/roles/kubernetes/node/vars/ubuntu-22.yml
Normal file
2
kubespray/roles/kubernetes/node/vars/ubuntu-22.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
kube_resolv_conf: "/run/systemd/resolve/resolv.conf"
|
||||
107
kubespray/roles/kubernetes/preinstall/defaults/main.yml
Normal file
107
kubespray/roles/kubernetes/preinstall/defaults/main.yml
Normal file
@@ -0,0 +1,107 @@
|
||||
---
|
||||
# Set to true to allow pre-checks to fail and continue deployment
|
||||
ignore_assert_errors: false
|
||||
|
||||
epel_enabled: false
|
||||
# Kubespray sets this to true after clusterDNS is running to apply changes to the host resolv.conf
|
||||
dns_late: false
|
||||
|
||||
common_required_pkgs:
|
||||
- "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1', 'openssl') }}"
|
||||
- curl
|
||||
- rsync
|
||||
- socat
|
||||
- unzip
|
||||
- e2fsprogs
|
||||
- xfsprogs
|
||||
- ebtables
|
||||
- bash-completion
|
||||
- tar
|
||||
|
||||
# Set to true if your network does not support IPv6
|
||||
# This maybe necessary for pulling Docker images from
|
||||
# GCE docker repository
|
||||
disable_ipv6_dns: false
|
||||
|
||||
kube_owner: kube
|
||||
kube_cert_group: kube-cert
|
||||
kube_config_dir: /etc/kubernetes
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
kube_cert_compat_dir: /etc/kubernetes/pki
|
||||
kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
|
||||
|
||||
# Flatcar Container Linux by Kinvolk cloud init config file to define /etc/resolv.conf content
|
||||
# for hostnet pods and infra needs
|
||||
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
|
||||
|
||||
# All inventory hostnames will be written into each /etc/hosts file.
|
||||
populate_inventory_to_hosts_file: true
|
||||
# K8S Api FQDN will be written into /etc/hosts file.
|
||||
populate_loadbalancer_apiserver_to_hosts_file: true
|
||||
|
||||
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
||||
|
||||
etc_hosts_localhost_entries:
|
||||
127.0.0.1:
|
||||
expected:
|
||||
- localhost
|
||||
- localhost.localdomain
|
||||
::1:
|
||||
expected:
|
||||
- localhost6
|
||||
- localhost6.localdomain
|
||||
unexpected:
|
||||
- localhost
|
||||
- localhost.localdomain
|
||||
|
||||
# Minimal memory requirement in MB for safety checks
|
||||
minimal_node_memory_mb: 1024
|
||||
minimal_master_memory_mb: 1500
|
||||
|
||||
yum_repo_dir: /etc/yum.repos.d
|
||||
|
||||
# number of times package install task should be retried
|
||||
pkg_install_retries: 4
|
||||
|
||||
# Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
|
||||
ping_access_ip: true
|
||||
|
||||
## NTP Settings
|
||||
# Start the ntpd or chrony service and enable it at system boot.
|
||||
ntp_enabled: false
|
||||
# The package to install which provides NTP functionality.
|
||||
# The default is ntp for most platforms, or chrony on RHEL/CentOS 7 and later.
|
||||
# The ntp_package can be one of ['ntp','chrony']
|
||||
ntp_package: >-
|
||||
{% if ansible_os_family == "RedHat" -%}
|
||||
chrony
|
||||
{%- else -%}
|
||||
ntp
|
||||
{%- endif -%}
|
||||
|
||||
# Manage the NTP configuration file.
|
||||
ntp_manage_config: false
|
||||
# Specify the NTP servers
|
||||
# Only takes effect when ntp_manage_config is true.
|
||||
ntp_servers:
|
||||
- "0.pool.ntp.org iburst"
|
||||
- "1.pool.ntp.org iburst"
|
||||
- "2.pool.ntp.org iburst"
|
||||
- "3.pool.ntp.org iburst"
|
||||
# Restrict NTP access to these hosts.
|
||||
# Only takes effect when ntp_manage_config is true.
|
||||
ntp_restrict:
|
||||
- "127.0.0.1"
|
||||
- "::1"
|
||||
# The NTP driftfile path
|
||||
# Only takes effect when ntp_manage_config is true.
|
||||
ntp_driftfile: /var/lib/ntp/ntp.drift
|
||||
# Enable tinker panic is useful when running NTP in a VM environment.
|
||||
# Only takes effect when ntp_manage_config is true.
|
||||
ntp_tinker_panic: false
|
||||
|
||||
# Force sync time immediately after the ntp installed, which is useful in in newly installed system.
|
||||
ntp_force_sync_immediately: false
|
||||
|
||||
# Set the timezone for your server. eg: "Etc/UTC","Etc/GMT-8". If not set, the timezone will not change.
|
||||
ntp_timezone: ""
|
||||
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
make_resolv_conf() {
|
||||
:
|
||||
}
|
||||
73
kubespray/roles/kubernetes/preinstall/gen-gitinfos.sh
Executable file
73
kubespray/roles/kubernetes/preinstall/gen-gitinfos.sh
Executable file
@@ -0,0 +1,73 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Text color variables
|
||||
txtbld=$(tput bold) # Bold
|
||||
bldred=${txtbld}$(tput setaf 1) # red
|
||||
bldgre=${txtbld}$(tput setaf 2) # green
|
||||
bldylw=${txtbld}$(tput setaf 3) # yellow
|
||||
txtrst=$(tput sgr0) # Reset
|
||||
err=${bldred}ERROR${txtrst}
|
||||
info=${bldgre}INFO${txtrst}
|
||||
warn=${bldylw}WARNING${txtrst}
|
||||
|
||||
usage()
|
||||
{
|
||||
cat << EOF
|
||||
Generates a file which contains useful git informations
|
||||
|
||||
Usage : $(basename $0) [global|diff]
|
||||
ex :
|
||||
Generate git information
|
||||
$(basename $0) global
|
||||
Generate diff from latest tag
|
||||
$(basename $0) diff
|
||||
EOF
|
||||
}
|
||||
|
||||
if [ $# != 1 ]; then
|
||||
printf "\n$err : Needs 1 argument\n"
|
||||
usage
|
||||
exit 2
|
||||
fi;
|
||||
|
||||
current_commit=$(git rev-parse HEAD)
|
||||
latest_tag=$(git describe --abbrev=0 --tags)
|
||||
latest_tag_commit=$(git show-ref -s ${latest_tag})
|
||||
tags_list=$(git tag --points-at "${latest_tag}")
|
||||
|
||||
case ${1} in
|
||||
"global")
|
||||
cat<<EOF
|
||||
deployment date="$(date '+%d-%m-%Y %Hh%M')"
|
||||
deployment_timestamp=$(date '+%s')
|
||||
user="$USER"
|
||||
current commit (HEAD)="${current_commit}"
|
||||
current_commit_timestamp=$(git log -1 --pretty=format:%ct)
|
||||
latest tag(s) (current branch)="${tags_list}"
|
||||
latest tag commit="${latest_tag_commit}"
|
||||
current branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||
branches list="$(git describe --contains --all HEAD)"
|
||||
git root directory="$(git rev-parse --show-toplevel)"
|
||||
EOF
|
||||
if ! git diff-index --quiet HEAD --; then
|
||||
printf "unstaged changes=\"/etc/.git-ansible.diff\""
|
||||
fi
|
||||
|
||||
if [ "${current_commit}" = "${latest_tag_commit}" ]; then
|
||||
printf "\ncurrent_commit_tag=\"${latest_tag}\""
|
||||
else
|
||||
printf "\nlast tag was "$(git describe --tags | awk -F- '{print $2}')" commits ago =\""
|
||||
printf "$(git log --pretty=format:" %h - %s" ${latest_tag}..HEAD)\""
|
||||
fi
|
||||
;;
|
||||
|
||||
"diff")
|
||||
git diff
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
printf "$err: Unknown argument ${1}"
|
||||
exit 1;
|
||||
;;
|
||||
esac
|
||||
132
kubespray/roles/kubernetes/preinstall/handlers/main.yml
Normal file
132
kubespray/roles/kubernetes/preinstall/handlers/main.yml
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
- name: Preinstall | propagate resolvconf to k8s components
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Preinstall | reload kubelet
|
||||
- Preinstall | kube-controller configured
|
||||
- Preinstall | kube-apiserver configured
|
||||
- Preinstall | restart kube-controller-manager docker
|
||||
- Preinstall | restart kube-controller-manager crio/containerd
|
||||
- Preinstall | restart kube-apiserver docker
|
||||
- Preinstall | restart kube-apiserver crio/containerd
|
||||
- Preinstall | wait for the apiserver to be running
|
||||
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
|
||||
|
||||
- name: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Preinstall | apply resolvconf cloud-init
|
||||
- Preinstall | reload kubelet
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: Preinstall | apply resolvconf cloud-init
|
||||
command: /usr/bin/coreos-cloudinit --from-file {{ resolveconf_cloud_init_conf }}
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: Preinstall | update resolvconf for networkmanager
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Preinstall | reload NetworkManager
|
||||
- Preinstall | reload kubelet
|
||||
|
||||
- name: Preinstall | reload NetworkManager
|
||||
service:
|
||||
name: NetworkManager.service
|
||||
state: restarted
|
||||
|
||||
- name: Preinstall | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
notify:
|
||||
- Preinstall | kube-controller configured
|
||||
- Preinstall | kube-apiserver configured
|
||||
- Preinstall | restart kube-controller-manager docker
|
||||
- Preinstall | restart kube-controller-manager crio/containerd
|
||||
- Preinstall | restart kube-apiserver docker
|
||||
- Preinstall | restart kube-apiserver crio/containerd
|
||||
when: not dns_early|bool
|
||||
|
||||
# FIXME(mattymo): Also restart for kubeadm mode
|
||||
- name: Preinstall | kube-apiserver configured
|
||||
stat:
|
||||
path: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kube_apiserver_set
|
||||
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
# FIXME(mattymo): Also restart for kubeadm mode
|
||||
- name: Preinstall | kube-controller configured
|
||||
stat:
|
||||
path: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kube_controller_set
|
||||
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
- name: Preinstall | restart kube-controller-manager docker
|
||||
shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
when:
|
||||
- container_manager == "docker"
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
- kube_controller_set.stat.exists
|
||||
|
||||
- name: Preinstall | restart kube-controller-manager crio/containerd
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
when:
|
||||
- container_manager in ['crio', 'containerd']
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
- kube_controller_set.stat.exists
|
||||
|
||||
- name: Preinstall | restart kube-apiserver docker
|
||||
shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
when:
|
||||
- container_manager == "docker"
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
- name: Preinstall | restart kube-apiserver crio/containerd
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: preinstall_restart_apiserver
|
||||
retries: 10
|
||||
until: preinstall_restart_apiserver.rc == 0
|
||||
delay: 1
|
||||
when:
|
||||
- container_manager in ['crio', 'containerd']
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
# When running this as the last phase ensure we wait for kube-apiserver to come up
|
||||
- name: Preinstall | wait for the apiserver to be running
|
||||
uri:
|
||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||
validate_certs: no
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
when:
|
||||
- dns_late
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
- name: Preinstall | Restart systemd-resolved
|
||||
service:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
|
||||
- name: Preinstall | restart ntp
|
||||
service:
|
||||
name: "{{ ntp_service_name }}"
|
||||
state: restarted
|
||||
when: ntp_enabled
|
||||
8
kubespray/roles/kubernetes/preinstall/meta/main.yml
Normal file
8
kubespray/roles/kubernetes/preinstall/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: adduser
|
||||
user: "{{ addusers.kube }}"
|
||||
when:
|
||||
- not is_fedora_coreos
|
||||
tags:
|
||||
- kubelet
|
||||
29
kubespray/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
Normal file
29
kubespray/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: Remove swapfile from /etc/fstab
|
||||
mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
with_items:
|
||||
- swap
|
||||
- none
|
||||
|
||||
# kubelet fails even if ansible_swaptotal_mb = 0
|
||||
- name: check swap
|
||||
command: /sbin/swapon -s
|
||||
register: swapon
|
||||
changed_when: no
|
||||
|
||||
- name: Disable swap
|
||||
command: /sbin/swapoff -a
|
||||
when:
|
||||
- swapon.stdout
|
||||
- kubelet_fail_swap_on | default(True)
|
||||
ignore_errors: "{{ ansible_check_mode }}" # noqa ignore-errors
|
||||
|
||||
- name: Disable swapOnZram for Fedora
|
||||
command: touch /etc/systemd/zram-generator.conf
|
||||
when:
|
||||
- swapon.stdout
|
||||
- ansible_distribution in ['Fedora']
|
||||
- kubelet_fail_swap_on | default(True)
|
||||
@@ -0,0 +1,316 @@
|
||||
---
|
||||
- name: Stop if either kube_control_plane or kube_node group is empty
|
||||
assert:
|
||||
that: "groups.get('{{ item }}')"
|
||||
with_items:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
run_once: true
|
||||
when: not ignore_assert_errors
|
||||
|
||||
- name: Stop if etcd group is empty in external etcd mode
|
||||
assert:
|
||||
that: groups.get('etcd')
|
||||
fail_msg: "Group 'etcd' cannot be empty in external etcd mode"
|
||||
run_once: true
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
|
||||
- name: Stop if non systemd OS type
|
||||
assert:
|
||||
that: ansible_service_mgr == "systemd"
|
||||
when: not ignore_assert_errors
|
||||
|
||||
- name: Stop if unknown OS
|
||||
assert:
|
||||
that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'Suse', 'openSUSE Leap', 'openSUSE Tumbleweed', 'ClearLinux', 'OracleLinux', 'AlmaLinux', 'Rocky', 'Amazon', 'Kylin Linux Advanced Server', 'UnionTech', 'openEuler']
|
||||
msg: "{{ ansible_distribution }} is not a known OS"
|
||||
when: not ignore_assert_errors
|
||||
|
||||
- name: Stop if unknown network plugin
|
||||
assert:
|
||||
that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'cni','kube-ovn', 'kube-router', 'macvlan']
|
||||
msg: "{{ kube_network_plugin }} is not supported"
|
||||
when:
|
||||
- kube_network_plugin is defined
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if unsupported version of Kubernetes
|
||||
assert:
|
||||
that: kube_version is version(kube_version_min_required, '>=')
|
||||
msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}"
|
||||
when: not ignore_assert_errors
|
||||
|
||||
# simplify this items-list when https://github.com/ansible/ansible/issues/15753 is resolved
|
||||
- name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")"
|
||||
assert:
|
||||
that: item.value|type_debug == 'bool'
|
||||
msg: "{{ item.value }} isn't a bool"
|
||||
run_once: yes
|
||||
with_items:
|
||||
- { name: download_run_once, value: "{{ download_run_once }}" }
|
||||
- { name: deploy_netchecker, value: "{{ deploy_netchecker }}" }
|
||||
- { name: download_always_pull, value: "{{ download_always_pull }}" }
|
||||
- { name: helm_enabled, value: "{{ helm_enabled }}" }
|
||||
- { name: openstack_lbaas_enabled, value: "{{ openstack_lbaas_enabled }}" }
|
||||
when: not ignore_assert_errors
|
||||
|
||||
- name: Stop if even number of etcd hosts
|
||||
assert:
|
||||
that: groups.etcd|length is not divisibleby 2
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- inventory_hostname in groups.get('etcd',[])
|
||||
|
||||
- name: Stop if memory is too small for masters
|
||||
assert:
|
||||
that: ansible_memtotal_mb >= minimal_master_memory_mb
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- name: Stop if memory is too small for nodes
|
||||
assert:
|
||||
that: ansible_memtotal_mb >= minimal_node_memory_mb
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- inventory_hostname in groups['kube_node']
|
||||
|
||||
# This assertion will fail on the safe side: One can indeed schedule more pods
|
||||
# on a node than the CIDR-range has space for when additional pods use the host
|
||||
# network namespace. It is impossible to ascertain the number of such pods at
|
||||
# provisioning time, so to establish a guarantee, we factor these out.
|
||||
# NOTICE: the check blatantly ignores the inet6-case
|
||||
- name: Guarantee that enough network address space is available for all pods
|
||||
assert:
|
||||
that: "{{ (kubelet_max_pods | default(110)) | int <= (2 ** (32 - kube_network_node_prefix | int)) - 2 }}"
|
||||
msg: "Do not schedule more pods on a node than inet addresses are available."
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
- kube_network_node_prefix is defined
|
||||
- kube_network_plugin != 'calico'
|
||||
|
||||
- name: Stop if ip var does not match local ips
|
||||
assert:
|
||||
that: (ip in ansible_all_ipv4_addresses) or (ip in ansible_all_ipv6_addresses)
|
||||
msg: "IPv4: '{{ ansible_all_ipv4_addresses }}' and IPv6: '{{ ansible_all_ipv6_addresses }}' do not contain '{{ ip }}'"
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- ip is defined
|
||||
|
||||
- name: Ensure ping package
|
||||
package:
|
||||
name: >-
|
||||
{%- if ansible_os_family == 'Debian' -%}
|
||||
iputils-ping
|
||||
{%- else -%}
|
||||
iputils
|
||||
{%- endif -%}
|
||||
state: present
|
||||
when:
|
||||
- access_ip is defined
|
||||
- not ignore_assert_errors
|
||||
- ping_access_ip
|
||||
- not is_fedora_coreos
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: Stop if access_ip is not pingable
|
||||
command: ping -c1 {{ access_ip }}
|
||||
when:
|
||||
- access_ip is defined
|
||||
- not ignore_assert_errors
|
||||
- ping_access_ip
|
||||
|
||||
- name: Stop if RBAC is not enabled when dashboard is enabled
|
||||
assert:
|
||||
that: rbac_enabled
|
||||
when:
|
||||
- dashboard_enabled
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if RBAC is not enabled when OCI cloud controller is enabled
|
||||
assert:
|
||||
that: rbac_enabled
|
||||
when:
|
||||
- cloud_provider is defined and cloud_provider == "oci"
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if kernel version is too low
|
||||
assert:
|
||||
that: ansible_kernel.split('-')[0] is version('4.9.17', '>=')
|
||||
when:
|
||||
- kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if bad hostname
|
||||
assert:
|
||||
that: inventory_hostname is match("[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
|
||||
msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
|
||||
when: not ignore_assert_errors
|
||||
|
||||
- name: check cloud_provider value
|
||||
assert:
|
||||
that: cloud_provider in ['gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external']
|
||||
msg: "If set the 'cloud_provider' var must be set either to 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci' or 'external'"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- not ignore_assert_errors
|
||||
tags:
|
||||
- cloud-provider
|
||||
- facts
|
||||
|
||||
- name: "Check that kube_service_addresses is a network range"
|
||||
assert:
|
||||
that:
|
||||
- kube_service_addresses | ipaddr('net')
|
||||
msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range"
|
||||
run_once: yes
|
||||
|
||||
- name: "Check that kube_pods_subnet is a network range"
|
||||
assert:
|
||||
that:
|
||||
- kube_pods_subnet | ipaddr('net')
|
||||
msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range"
|
||||
run_once: yes
|
||||
|
||||
- name: "Check that kube_pods_subnet does not collide with kube_service_addresses"
|
||||
assert:
|
||||
that:
|
||||
- kube_pods_subnet | ipaddr(kube_service_addresses) | string == 'None'
|
||||
msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses"
|
||||
run_once: yes
|
||||
|
||||
- name: "Check that IP range is enough for the nodes"
|
||||
assert:
|
||||
that:
|
||||
- 2 ** (kube_network_node_prefix - kube_pods_subnet | ipaddr('prefix')) >= groups['k8s_cluster'] | length
|
||||
msg: "Not enough IPs are available for the desired node count."
|
||||
when: kube_network_plugin != 'calico'
|
||||
run_once: yes
|
||||
|
||||
- name: Stop if unknown dns mode
|
||||
assert:
|
||||
that: dns_mode in ['coredns', 'coredns_dual', 'manual', 'none']
|
||||
msg: "dns_mode can only be 'coredns', 'coredns_dual', 'manual' or 'none'"
|
||||
when: dns_mode is defined
|
||||
run_once: true
|
||||
|
||||
- name: Stop if unknown kube proxy mode
|
||||
assert:
|
||||
that: kube_proxy_mode in ['iptables', 'ipvs']
|
||||
msg: "kube_proxy_mode can only be 'iptables' or 'ipvs'"
|
||||
when: kube_proxy_mode is defined
|
||||
run_once: true
|
||||
|
||||
- name: Stop if unknown cert_management
|
||||
assert:
|
||||
that: cert_management|d('script') in ['script', 'none']
|
||||
msg: "cert_management can only be 'script' or 'none'"
|
||||
run_once: true
|
||||
|
||||
- name: Stop if unknown resolvconf_mode
|
||||
assert:
|
||||
that: resolvconf_mode in ['docker_dns', 'host_resolvconf', 'none']
|
||||
msg: "resolvconf_mode can only be 'docker_dns', 'host_resolvconf' or 'none'"
|
||||
when: resolvconf_mode is defined
|
||||
run_once: true
|
||||
|
||||
- name: Stop if etcd deployment type is not host, docker or kubeadm
|
||||
assert:
|
||||
that: etcd_deployment_type in ['host', 'docker', 'kubeadm']
|
||||
msg: "The etcd deployment type, 'etcd_deployment_type', must be host, docker or kubeadm"
|
||||
when:
|
||||
- inventory_hostname in groups.get('etcd',[])
|
||||
|
||||
- name: Stop if container manager is not docker, crio or containerd
|
||||
assert:
|
||||
that: container_manager in ['docker', 'crio', 'containerd']
|
||||
msg: "The container manager, 'container_manager', must be docker, crio or containerd"
|
||||
run_once: true
|
||||
|
||||
- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker
|
||||
assert:
|
||||
that: etcd_deployment_type in ['host', 'kubeadm']
|
||||
msg: "The etcd deployment type, 'etcd_deployment_type', must be host or kubeadm when container_manager is not docker"
|
||||
when:
|
||||
- inventory_hostname in groups.get('etcd',[])
|
||||
- container_manager != 'docker'
|
||||
|
||||
# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled`
|
||||
- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined
|
||||
block:
|
||||
- name: Warn the user if they are still using `etcd_kubeadm_enabled`
|
||||
debug:
|
||||
msg: >
|
||||
"WARNING! => `etcd_kubeadm_enabled` is deprecated and will be removed in a future release.
|
||||
You can set `etcd_deployment_type` to `kubeadm` instead of setting `etcd_kubeadm_enabled` to `true`."
|
||||
changed_when: true
|
||||
|
||||
- name: Stop if `etcd_kubeadm_enabled` is defined and `etcd_deployment_type` is not `kubeadm` or `host`
|
||||
assert:
|
||||
that: etcd_deployment_type == 'kubeadm'
|
||||
msg: >
|
||||
It is not possible to use `etcd_kubeadm_enabled` when `etcd_deployment_type` is set to {{ etcd_deployment_type }}.
|
||||
Unset the `etcd_kubeadm_enabled` variable and set `etcd_deployment_type` to desired deployment type (`host`, `kubeadm`, `docker`) instead."
|
||||
when: etcd_kubeadm_enabled
|
||||
run_once: yes
|
||||
when: etcd_kubeadm_enabled is defined
|
||||
|
||||
- name: Stop if download_localhost is enabled but download_run_once is not
|
||||
assert:
|
||||
that: download_run_once
|
||||
msg: "download_localhost requires enable download_run_once"
|
||||
when: download_localhost
|
||||
|
||||
- name: Stop if kata_containers_enabled is enabled when container_manager is docker
|
||||
assert:
|
||||
that: container_manager != 'docker'
|
||||
msg: "kata_containers_enabled support only for containerd and crio-o. See https://github.com/kata-containers/documentation/blob/1.11.4/how-to/run-kata-with-k8s.md#install-a-cri-implementation for details"
|
||||
when: kata_containers_enabled
|
||||
|
||||
- name: Stop if gvisor_enabled is enabled when container_manager is not containerd
|
||||
assert:
|
||||
that: container_manager == 'containerd'
|
||||
msg: "gvisor_enabled support only compatible with containerd. See https://github.com/kubernetes-sigs/kubespray/issues/7650 for details"
|
||||
when: gvisor_enabled
|
||||
|
||||
- name: Stop if download_localhost is enabled for Flatcar Container Linux
|
||||
assert:
|
||||
that: ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
msg: "download_run_once not supported for Flatcar Container Linux"
|
||||
when: download_run_once or download_force_cache
|
||||
|
||||
- name: Ensure minimum containerd version
|
||||
assert:
|
||||
that: containerd_version is version(containerd_min_version_required, '>=')
|
||||
msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}"
|
||||
run_once: yes
|
||||
when:
|
||||
- containerd_version not in ['latest', 'edge', 'stable']
|
||||
- container_manager == 'containerd'
|
||||
|
||||
- name: Stop if using deprecated containerd_config variable
|
||||
assert:
|
||||
that: containerd_config is not defined
|
||||
msg: "Variable containerd_config is now deprecated. See https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/group_vars/all/containerd.yml for details."
|
||||
when:
|
||||
- containerd_config is defined
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if auto_renew_certificates is enabled when certificates are managed externally (kube_external_ca_mode is true)
|
||||
assert:
|
||||
that: not auto_renew_certificates
|
||||
msg: "Variable auto_renew_certificates must be disabled when CA are managed externally: kube_external_ca_mode = true"
|
||||
when:
|
||||
- kube_external_ca_mode
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if using deprecated comma separated list for admission plugins
|
||||
assert:
|
||||
that: "',' not in kube_apiserver_enable_admission_plugins[0]"
|
||||
msg: "Comma-separated list for kube_apiserver_enable_admission_plugins is now deprecated, use separate list items for each plugin."
|
||||
when:
|
||||
- kube_apiserver_enable_admission_plugins is defined
|
||||
- kube_apiserver_enable_admission_plugins | length > 0
|
||||
279
kubespray/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
Normal file
279
kubespray/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
Normal file
@@ -0,0 +1,279 @@
|
||||
---
|
||||
- name: Force binaries directory for Flatcar Container Linux by Kinvolk
|
||||
set_fact:
|
||||
bin_dir: "/opt/bin"
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Set os_family fact for Kylin Linux Advanced Server and openEuler
|
||||
set_fact:
|
||||
ansible_os_family: "RedHat"
|
||||
ansible_distribution_major_version: "8"
|
||||
when: ansible_distribution in ["Kylin Linux Advanced Server", "openEuler"]
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: check if booted with ostree
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: ostree
|
||||
|
||||
- name: set is_fedora_coreos
|
||||
lineinfile:
|
||||
path: /etc/os-release
|
||||
line: "VARIANT_ID=coreos"
|
||||
state: present
|
||||
check_mode: yes
|
||||
register: os_variant_coreos
|
||||
changed_when: false
|
||||
|
||||
- name: set is_fedora_coreos
|
||||
set_fact:
|
||||
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
|
||||
|
||||
- name: check resolvconf
|
||||
command: which resolvconf
|
||||
register: resolvconf
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: check existence of /etc/resolvconf/resolv.conf.d
|
||||
stat:
|
||||
path: /etc/resolvconf/resolv.conf.d
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
failed_when: false
|
||||
register: resolvconfd_path
|
||||
|
||||
- name: check status of /etc/resolv.conf
|
||||
stat:
|
||||
path: /etc/resolv.conf
|
||||
follow: no
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
failed_when: false
|
||||
register: resolvconf_stat
|
||||
|
||||
- block:
|
||||
|
||||
- name: get content of /etc/resolv.conf
|
||||
slurp:
|
||||
src: /etc/resolv.conf
|
||||
register: resolvconf_slurp
|
||||
|
||||
- name: get currently configured nameservers
|
||||
set_fact:
|
||||
configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}"
|
||||
when: resolvconf_slurp.content is defined
|
||||
|
||||
when: resolvconf_stat.stat.exists is defined and resolvconf_stat.stat.exists
|
||||
|
||||
- name: Stop if /etc/resolv.conf not configured nameservers
|
||||
assert:
|
||||
that: configured_nameservers|length>0
|
||||
fail_msg: "nameserver should not empty in /etc/resolv.conf"
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- configured_nameservers is defined
|
||||
- not (upstream_dns_servers is defined and upstream_dns_servers|length > 0)
|
||||
- not (disable_host_nameservers | default(false))
|
||||
|
||||
- name: NetworkManager | Check if host has NetworkManager
|
||||
# noqa 303 Should we use service_facts for this?
|
||||
command: systemctl is-active --quiet NetworkManager.service
|
||||
register: networkmanager_enabled
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
|
||||
- name: check systemd-resolved
|
||||
# noqa 303 Should we use service_facts for this?
|
||||
command: systemctl is-active systemd-resolved
|
||||
register: systemd_resolved_enabled
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: set default dns if remove_default_searchdomains is false
|
||||
set_fact:
|
||||
default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
|
||||
when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
|
||||
|
||||
- name: set dns facts
|
||||
set_fact:
|
||||
resolvconf: >-
|
||||
{%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%}
|
||||
bogus_domains: |-
|
||||
{% for d in default_searchdomains|default([]) + searchdomains|default([]) -%}
|
||||
{{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
|
||||
{%- endfor %}
|
||||
cloud_resolver: "{{ ['169.254.169.254'] if cloud_provider is defined and cloud_provider == 'gce' else
|
||||
['169.254.169.253'] if cloud_provider is defined and cloud_provider == 'aws' else
|
||||
[] }}"
|
||||
|
||||
- name: check if kubelet is configured
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/kubelet.env"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubelet_configured
|
||||
changed_when: false
|
||||
|
||||
- name: check if early DNS configuration stage
|
||||
set_fact:
|
||||
dns_early: "{{ not kubelet_configured.stat.exists }}"
|
||||
|
||||
- name: target resolv.conf files
|
||||
set_fact:
|
||||
resolvconffile: /etc/resolv.conf
|
||||
base: >-
|
||||
{%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%}
|
||||
head: >-
|
||||
{%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%}
|
||||
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
|
||||
|
||||
- name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS)
|
||||
set_fact:
|
||||
resolvconffile: /tmp/resolveconf_cloud_init_conf
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] or is_fedora_coreos
|
||||
|
||||
- name: check if /etc/dhclient.conf exists
|
||||
stat:
|
||||
path: /etc/dhclient.conf
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhclient.conf
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhclient.conf
|
||||
when: dhclient_stat.stat.exists
|
||||
|
||||
- name: check if /etc/dhcp/dhclient.conf exists
|
||||
stat:
|
||||
path: /etc/dhcp/dhclient.conf
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: dhcp_dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhcp/dhclient.conf
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhcp/dhclient.conf
|
||||
when: dhcp_dhclient_stat.stat.exists
|
||||
|
||||
- name: target dhclient hook file for Red Hat family
|
||||
set_fact:
|
||||
dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: target dhclient hook file for Debian family
|
||||
set_fact:
|
||||
dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: generate search domains to resolvconf
|
||||
set_fact:
|
||||
searchentries:
|
||||
search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }}
|
||||
domainentry:
|
||||
domain {{ dns_domain }}
|
||||
supersede_search:
|
||||
supersede domain-search "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join('", "') }}";
|
||||
supersede_domain:
|
||||
supersede domain-name "{{ dns_domain }}";
|
||||
|
||||
- name: pick coredns cluster IP or default resolver
|
||||
set_fact:
|
||||
coredns_server: |-
|
||||
{%- if dns_mode == 'coredns' and not dns_early|bool -%}
|
||||
{{ [ skydns_server ] }}
|
||||
{%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%}
|
||||
{{ [ skydns_server ] + [ skydns_server_secondary ] }}
|
||||
{%- elif dns_mode == 'manual' and not dns_early|bool -%}
|
||||
{{ ( manual_dns_server.split(',') | list) }}
|
||||
{%- elif dns_mode == 'none' and not dns_early|bool -%}
|
||||
[]
|
||||
{%- elif dns_early|bool -%}
|
||||
{{ upstream_dns_servers|default([]) }}
|
||||
{%- endif -%}
|
||||
|
||||
# This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout
|
||||
- name: generate nameservers for resolvconf, including cluster DNS
|
||||
set_fact:
|
||||
nameserverentries: |-
|
||||
{{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }}
|
||||
supersede_nameserver:
|
||||
supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }};
|
||||
when: not dns_early or dns_late
|
||||
|
||||
# This task should run instead of the above task when cluster/nodelocal DNS hasn't
|
||||
# been deployed yet (like scale.yml/cluster.yml) or when it's down (reset.yml)
|
||||
- name: generate nameservers for resolvconf, not including cluster DNS
|
||||
set_fact:
|
||||
nameserverentries: |-
|
||||
{{ ( nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }}
|
||||
supersede_nameserver:
|
||||
supersede domain-name-servers {{ ( nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }};
|
||||
when: dns_early and not dns_late
|
||||
|
||||
- name: gather os specific variables
|
||||
include_vars: "{{ item }}"
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}.yml"
|
||||
- "{{ ansible_os_family|lower }}.yml"
|
||||
- defaults.yml
|
||||
paths:
|
||||
- ../vars
|
||||
skip: true
|
||||
|
||||
- name: set etcd vars if using kubeadm mode
|
||||
set_fact:
|
||||
etcd_cert_dir: "{{ kube_cert_dir }}"
|
||||
kube_etcd_cacert_file: "etcd/ca.crt"
|
||||
kube_etcd_cert_file: "apiserver-etcd-client.crt"
|
||||
kube_etcd_key_file: "apiserver-etcd-client.key"
|
||||
when:
|
||||
- etcd_deployment_type == "kubeadm"
|
||||
|
||||
- name: check /usr readonly
|
||||
stat:
|
||||
path: "/usr"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: usr
|
||||
|
||||
- name: set alternate flexvolume path
|
||||
set_fact:
|
||||
kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins
|
||||
when: not usr.stat.writeable
|
||||
|
||||
- block:
|
||||
- name: Ensure IPv6DualStack featureGate is set when enable_dual_stack_networks is true
|
||||
set_fact:
|
||||
kube_feature_gates: "{{ kube_feature_gates + [ 'IPv6DualStack=true' ] }}"
|
||||
when:
|
||||
- not 'IPv6DualStack=true' in kube_feature_gates
|
||||
|
||||
- name: Ensure IPv6DualStack kubeadm featureGate is set when enable_dual_stack_networks is true
|
||||
set_fact:
|
||||
kubeadm_feature_gates: "{{ kubeadm_feature_gates + [ 'IPv6DualStack=true' ] }}"
|
||||
when:
|
||||
- not 'IPv6DualStack=true' in kubeadm_feature_gates
|
||||
when:
|
||||
- enable_dual_stack_networks
|
||||
- kube_version is version('v1.24.0', '<')
|
||||
@@ -0,0 +1,105 @@
|
||||
---
|
||||
- name: Create kubernetes directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ kube_owner }}"
|
||||
mode: 0755
|
||||
when: inventory_hostname in groups['k8s_cluster']
|
||||
become: true
|
||||
tags:
|
||||
- kubelet
|
||||
- k8s-secrets
|
||||
- kube-controller-manager
|
||||
- kube-apiserver
|
||||
- bootstrap-os
|
||||
- apps
|
||||
- network
|
||||
- master
|
||||
- node
|
||||
with_items:
|
||||
- "{{ kube_config_dir }}"
|
||||
- "{{ kube_cert_dir }}"
|
||||
- "{{ kube_manifest_dir }}"
|
||||
- "{{ kube_script_dir }}"
|
||||
- "{{ kubelet_flexvolumes_plugins_dir }}"
|
||||
|
||||
- name: Create other directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
mode: 0755
|
||||
when: inventory_hostname in groups['k8s_cluster']
|
||||
become: true
|
||||
tags:
|
||||
- kubelet
|
||||
- k8s-secrets
|
||||
- kube-controller-manager
|
||||
- kube-apiserver
|
||||
- bootstrap-os
|
||||
- apps
|
||||
- network
|
||||
- master
|
||||
- node
|
||||
with_items:
|
||||
- "{{ bin_dir }}"
|
||||
|
||||
- name: Check if kubernetes kubeadm compat cert dir exists
|
||||
stat:
|
||||
path: "{{ kube_cert_compat_dir }}"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kube_cert_compat_dir_check
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
- kube_cert_dir != kube_cert_compat_dir
|
||||
|
||||
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
|
||||
file:
|
||||
src: "{{ kube_cert_dir }}"
|
||||
dest: "{{ kube_cert_compat_dir }}"
|
||||
state: link
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
- kube_cert_dir != kube_cert_compat_dir
|
||||
- not kube_cert_compat_dir_check.stat.exists
|
||||
|
||||
- name: Create cni directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ kube_owner }}"
|
||||
mode: 0755
|
||||
with_items:
|
||||
- "/etc/cni/net.d"
|
||||
- "/opt/cni/bin"
|
||||
- "/var/lib/calico"
|
||||
when:
|
||||
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"]
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
tags:
|
||||
- network
|
||||
- cilium
|
||||
- calico
|
||||
- weave
|
||||
- canal
|
||||
- kube-ovn
|
||||
- kube-router
|
||||
- bootstrap-os
|
||||
|
||||
- name: Create local volume provisioner directories
|
||||
file:
|
||||
path: "{{ local_volume_provisioner_storage_classes[item].host_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "{{ local_volume_provisioner_directory_mode }}"
|
||||
with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
- local_volume_provisioner_enabled
|
||||
tags:
|
||||
- persistent_volumes
|
||||
@@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: create temporary resolveconf cloud init file
|
||||
command: cp -f /etc/resolv.conf "{{ resolvconffile }}"
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: Add domain/search/nameservers/options to resolv.conf
|
||||
blockinfile:
|
||||
path: "{{ resolvconffile }}"
|
||||
block: |-
|
||||
{% for item in [domainentry] + [searchentries] -%}
|
||||
{{ item }}
|
||||
{% endfor %}
|
||||
{% for item in nameserverentries.split(',') %}
|
||||
nameserver {{ item }}
|
||||
{% endfor %}
|
||||
options ndots:{{ ndots }} timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }}
|
||||
state: present
|
||||
insertbefore: BOF
|
||||
create: yes
|
||||
backup: "{{ not resolvconf_stat.stat.islnk }}"
|
||||
marker: "# Ansible entries {mark}"
|
||||
mode: 0644
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
|
||||
- name: Remove search/domain/nameserver options before block
|
||||
replace:
|
||||
path: "{{ item[0] }}"
|
||||
regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)'
|
||||
backup: "{{ not resolvconf_stat.stat.islnk }}"
|
||||
with_nested:
|
||||
- "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}"
|
||||
- [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
|
||||
- name: Remove search/domain/nameserver options after block
|
||||
replace:
|
||||
path: "{{ item[0] }}"
|
||||
regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+'
|
||||
replace: '\1'
|
||||
backup: "{{ not resolvconf_stat.stat.islnk }}"
|
||||
with_nested:
|
||||
- "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}"
|
||||
- [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
|
||||
- name: get temporary resolveconf cloud init file content
|
||||
command: cat {{ resolvconffile }}
|
||||
register: cloud_config
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: persist resolvconf cloud init file
|
||||
template:
|
||||
dest: "{{ resolveconf_cloud_init_conf }}"
|
||||
src: resolvconf.j2
|
||||
owner: root
|
||||
mode: 0644
|
||||
notify: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
|
||||
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Write resolved.conf
|
||||
template:
|
||||
src: resolved.conf.j2
|
||||
dest: /etc/systemd/resolved.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
notify: Preinstall | Restart systemd-resolved
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: NetworkManager | Ensure NetworkManager conf.d dir
|
||||
file:
|
||||
path: "/etc/NetworkManager/conf.d"
|
||||
state: directory
|
||||
recurse: yes
|
||||
|
||||
- name: NetworkManager | Prevent NetworkManager from managing Calico interfaces (cali*/tunl*/vxlan.calico)
|
||||
copy:
|
||||
content: |
|
||||
[keyfile]
|
||||
unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
|
||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
||||
mode: 0644
|
||||
when:
|
||||
- kube_network_plugin == "calico"
|
||||
notify: Preinstall | reload NetworkManager
|
||||
|
||||
# TODO: add other network_plugin interfaces
|
||||
|
||||
- name: NetworkManager | Prevent NetworkManager from managing K8S interfaces (kube-ipvs0/nodelocaldns)
|
||||
copy:
|
||||
content: |
|
||||
[keyfile]
|
||||
unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
|
||||
dest: /etc/NetworkManager/conf.d/k8s.conf
|
||||
mode: 0644
|
||||
notify: Preinstall | reload NetworkManager
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: NetworkManager | Add nameservers to NM configuration
|
||||
ini_file:
|
||||
path: /etc/NetworkManager/conf.d/dns.conf
|
||||
section: global-dns-domain-*
|
||||
option: servers
|
||||
value: "{{ nameserverentries }}"
|
||||
mode: '0600'
|
||||
backup: yes
|
||||
notify: Preinstall | update resolvconf for networkmanager
|
||||
|
||||
- name: set default dns if remove_default_searchdomains is false
|
||||
set_fact:
|
||||
default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
|
||||
when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
|
||||
|
||||
- name: NetworkManager | Add DNS search to NM configuration
|
||||
ini_file:
|
||||
path: /etc/NetworkManager/conf.d/dns.conf
|
||||
section: global-dns
|
||||
option: searches
|
||||
value: "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join(',') }}"
|
||||
mode: '0600'
|
||||
backup: yes
|
||||
notify: Preinstall | update resolvconf for networkmanager
|
||||
|
||||
- name: NetworkManager | Add DNS options to NM configuration
|
||||
ini_file:
|
||||
path: /etc/NetworkManager/conf.d/dns.conf
|
||||
section: global-dns
|
||||
option: options
|
||||
value: "ndots:{{ ndots }};timeout:{{ dns_timeout|default('2') }};attempts:{{ dns_attempts|default('2') }};"
|
||||
mode: '0600'
|
||||
backup: yes
|
||||
notify: Preinstall | update resolvconf for networkmanager
|
||||
@@ -0,0 +1,98 @@
|
||||
---
|
||||
- name: Update package management cache (zypper) - SUSE
|
||||
command: zypper -n --gpg-auto-import-keys ref
|
||||
register: make_cache_output
|
||||
until: make_cache_output is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- ansible_pkg_mgr == 'zypper'
|
||||
tags: bootstrap-os
|
||||
|
||||
- block:
|
||||
- name: Add Debian Backports apt repo
|
||||
apt_repository:
|
||||
repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"
|
||||
state: present
|
||||
filename: debian-backports
|
||||
|
||||
- name: Set libseccomp2 pin priority to apt_preferences on Debian buster
|
||||
copy:
|
||||
content: |
|
||||
Package: libseccomp2
|
||||
Pin: release a={{ ansible_distribution_release }}-backports
|
||||
Pin-Priority: 1001
|
||||
dest: "/etc/apt/preferences.d/libseccomp2"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_distribution == "Debian"
|
||||
- ansible_distribution_version == "10"
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Update package management cache (APT)
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
when: ansible_os_family == "Debian"
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Remove legacy docker repo file
|
||||
file:
|
||||
path: "{{ yum_repo_dir }}/docker.repo"
|
||||
state: absent
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- not is_fedora_coreos
|
||||
|
||||
- name: Install python3-dnf for latest RedHat versions
|
||||
command: dnf install -y python3-dnf
|
||||
register: dnf_task_result
|
||||
until: dnf_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- ansible_distribution == "Fedora"
|
||||
- ansible_distribution_major_version|int >= 30
|
||||
- not is_fedora_coreos
|
||||
changed_when: False
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Install epel-release on RHEL derivatives
|
||||
package:
|
||||
name: epel-release
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- not is_fedora_coreos
|
||||
- epel_enabled|bool
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Update common_required_pkgs with ipvsadm when kube_proxy_mode is ipvs
|
||||
set_fact:
|
||||
common_required_pkgs: "{{ common_required_pkgs|default([]) + ['ipvsadm', 'ipset'] }}"
|
||||
when: kube_proxy_mode == 'ipvs'
|
||||
|
||||
- name: Install packages requirements
|
||||
package:
|
||||
name: "{{ required_pkgs | default([]) | union(common_required_pkgs|default([])) }}"
|
||||
state: present
|
||||
register: pkgs_task_result
|
||||
until: pkgs_task_result is succeeded
|
||||
retries: "{{ pkg_install_retries }}"
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos)
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Install ipvsadm for ClearLinux
|
||||
package:
|
||||
name: ipvsadm
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family in ["ClearLinux"]
|
||||
- kube_proxy_mode == 'ipvs'
|
||||
@@ -0,0 +1,138 @@
|
||||
---
|
||||
# Todo : selinux configuration
|
||||
- name: Confirm selinux deployed
|
||||
stat:
|
||||
path: /etc/selinux/config
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- "'Amazon' not in ansible_distribution"
|
||||
register: slc
|
||||
|
||||
- name: Set selinux policy
|
||||
selinux:
|
||||
policy: targeted
|
||||
state: "{{ preinstall_selinux_state }}"
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- "'Amazon' not in ansible_distribution"
|
||||
- slc.stat.exists
|
||||
changed_when: False
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Disable IPv6 DNS lookup
|
||||
lineinfile:
|
||||
dest: /etc/gai.conf
|
||||
line: "precedence ::ffff:0:0/96 100"
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
mode: 0644
|
||||
when:
|
||||
- disable_ipv6_dns
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Clean previously used sysctl file locations
|
||||
file:
|
||||
path: "/etc/sysctl.d/{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- ipv4-ip_forward.conf
|
||||
- bridge-nf-call.conf
|
||||
|
||||
- name: Stat sysctl file configuration
|
||||
stat:
|
||||
path: "{{ sysctl_file_path }}"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: sysctl_file_stat
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Change sysctl file path to link source if linked
|
||||
set_fact:
|
||||
sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}"
|
||||
when:
|
||||
- sysctl_file_stat.stat.islnk is defined
|
||||
- sysctl_file_stat.stat.islnk
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Make sure sysctl file path folder exists
|
||||
file:
|
||||
name: "{{ sysctl_file_path | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Enable ip forwarding
|
||||
sysctl:
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
name: net.ipv4.ip_forward
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Enable ipv6 forwarding
|
||||
sysctl:
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
name: net.ipv6.conf.all.forwarding
|
||||
value: 1
|
||||
state: present
|
||||
reload: yes
|
||||
when: enable_dual_stack_networks | bool
|
||||
|
||||
- name: Check if we need to set fs.may_detach_mounts
|
||||
stat:
|
||||
path: /proc/sys/fs/may_detach_mounts
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: fs_may_detach_mounts
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Set fs.may_detach_mounts if needed
|
||||
sysctl:
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
name: fs.may_detach_mounts
|
||||
value: 1
|
||||
state: present
|
||||
reload: yes
|
||||
when: fs_may_detach_mounts.stat.exists | d(false)
|
||||
|
||||
- name: Ensure kube-bench parameters are set
|
||||
sysctl:
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
state: present
|
||||
reload: yes
|
||||
with_items:
|
||||
- { name: kernel.keys.root_maxbytes, value: 25000000 }
|
||||
- { name: kernel.keys.root_maxkeys, value: 1000000 }
|
||||
- { name: kernel.panic, value: 10 }
|
||||
- { name: kernel.panic_on_oops, value: 1 }
|
||||
- { name: vm.overcommit_memory, value: 1 }
|
||||
- { name: vm.panic_on_oom, value: 0 }
|
||||
when: kubelet_protect_kernel_defaults|bool
|
||||
|
||||
- name: Check dummy module
|
||||
modprobe:
|
||||
name: dummy
|
||||
state: present
|
||||
params: 'numdummies=0'
|
||||
when: enable_nodelocaldns
|
||||
|
||||
- name: Set additional sysctl variables
|
||||
sysctl:
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
state: present
|
||||
reload: yes
|
||||
with_items: "{{ additional_sysctl }}"
|
||||
@@ -0,0 +1,79 @@
|
||||
---
|
||||
- name: Ensure NTP package
|
||||
package:
|
||||
name:
|
||||
- "{{ ntp_package }}"
|
||||
state: present
|
||||
|
||||
- name: Disable systemd-timesyncd
|
||||
service:
|
||||
name: systemd-timesyncd.service
|
||||
enabled: false
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Set fact NTP settings
|
||||
set_fact:
|
||||
ntp_config_file: >-
|
||||
{% if ntp_package == "ntp" -%}
|
||||
/etc/ntp.conf
|
||||
{%- elif ansible_os_family in ['RedHat', 'Suse'] -%}
|
||||
/etc/chrony.conf
|
||||
{%- else -%}
|
||||
/etc/chrony/chrony.conf
|
||||
{%- endif -%}
|
||||
ntp_service_name: >-
|
||||
{% if ntp_package == "chrony" -%}
|
||||
chronyd
|
||||
{%- elif ansible_os_family == 'RedHat' -%}
|
||||
ntpd
|
||||
{%- else -%}
|
||||
ntp
|
||||
{%- endif %}
|
||||
|
||||
- name: Generate NTP configuration file.
|
||||
template:
|
||||
src: "{{ ntp_config_file | basename }}.j2"
|
||||
dest: "{{ ntp_config_file }}"
|
||||
mode: 0644
|
||||
notify: Preinstall | restart ntp
|
||||
when:
|
||||
- ntp_manage_config
|
||||
|
||||
- name: Stop the NTP Deamon For Sync Immediately # `ntpd -gq`,`chronyd -q` requires the ntp daemon stop
|
||||
service:
|
||||
name: "{{ ntp_service_name }}"
|
||||
state: stopped
|
||||
when:
|
||||
- ntp_force_sync_immediately
|
||||
|
||||
- name: Force Sync NTP Immediately
|
||||
command: >-
|
||||
timeout -k 60s 60s
|
||||
{% if ntp_package == "ntp" -%}
|
||||
ntpd -gq
|
||||
{%- else -%}
|
||||
chronyd -q
|
||||
{%- endif -%}
|
||||
when:
|
||||
- ntp_force_sync_immediately
|
||||
|
||||
- name: Ensure NTP service is started and enabled
|
||||
service:
|
||||
name: "{{ ntp_service_name }}"
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure tzdata package
|
||||
package:
|
||||
name:
|
||||
- tzdata
|
||||
state: present
|
||||
when:
|
||||
- ntp_timezone
|
||||
|
||||
- name: Set timezone
|
||||
timezone:
|
||||
name: "{{ ntp_timezone }}"
|
||||
when:
|
||||
- ntp_timezone
|
||||
@@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: Hosts | create list from inventory
|
||||
set_fact:
|
||||
etc_hosts_inventory_block: |-
|
||||
{% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
|
||||
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
|
||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
|
||||
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %}
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
delegate_facts: yes
|
||||
run_once: yes
|
||||
|
||||
- name: Hosts | populate inventory into hosts file
|
||||
blockinfile:
|
||||
path: /etc/hosts
|
||||
block: "{{ hostvars.localhost.etc_hosts_inventory_block }}"
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
unsafe_writes: yes
|
||||
marker: "# Ansible inventory hosts {mark}"
|
||||
mode: 0644
|
||||
when: populate_inventory_to_hosts_file
|
||||
|
||||
- name: Hosts | populate kubernetes loadbalancer address into hosts file
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
|
||||
line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}"
|
||||
state: present
|
||||
backup: yes
|
||||
unsafe_writes: yes
|
||||
when:
|
||||
- populate_loadbalancer_apiserver_to_hosts_file
|
||||
- loadbalancer_apiserver is defined
|
||||
- loadbalancer_apiserver.address is defined
|
||||
|
||||
- name: Hosts | Retrieve hosts file content
|
||||
slurp:
|
||||
src: /etc/hosts
|
||||
register: etc_hosts_content
|
||||
|
||||
- name: Hosts | Extract existing entries for localhost from hosts file
|
||||
set_fact:
|
||||
etc_hosts_localhosts_dict: >-
|
||||
{%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%}
|
||||
{{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }}
|
||||
with_items: "{{ (etc_hosts_content['content'] | b64decode).splitlines() }}"
|
||||
when:
|
||||
- etc_hosts_content.content is defined
|
||||
- (item is match('^::1 .*') or item is match('^127.0.0.1 .*'))
|
||||
|
||||
- name: Hosts | Update target hosts file entries dict with required entries
|
||||
set_fact:
|
||||
etc_hosts_localhosts_dict_target: >-
|
||||
{%- set target_entries = (etc_hosts_localhosts_dict|default({})).get(item.key, []) | difference(item.value.get('unexpected' ,[])) -%}
|
||||
{{ etc_hosts_localhosts_dict_target|default({}) | combine({item.key: (target_entries + item.value.expected)|unique}) }}
|
||||
loop: "{{ etc_hosts_localhost_entries|dict2items }}"
|
||||
|
||||
- name: Hosts | Update (if necessary) hosts file
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
line: "{{ item.key }} {{ item.value|join(' ') }}"
|
||||
regexp: "^{{ item.key }}.*$"
|
||||
state: present
|
||||
backup: yes
|
||||
unsafe_writes: yes
|
||||
loop: "{{ etc_hosts_localhosts_dict_target|default({})|dict2items }}"
|
||||
|
||||
# gather facts to update ansible_fqdn
|
||||
- name: Update facts
|
||||
setup:
|
||||
gather_subset: min
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Configure dhclient to supersede search/domain/nameservers
|
||||
blockinfile:
|
||||
block: |-
|
||||
{% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%}
|
||||
{{ item }}
|
||||
{% endfor %}
|
||||
path: "{{ dhclientconffile }}"
|
||||
create: yes
|
||||
state: present
|
||||
insertbefore: BOF
|
||||
backup: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
mode: 0644
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
|
||||
- name: Configure dhclient hooks for resolv.conf (non-RH)
|
||||
template:
|
||||
src: dhclient_dnsupdate.sh.j2
|
||||
dest: "{{ dhclienthookfile }}"
|
||||
owner: root
|
||||
mode: 0755
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
when: ansible_os_family not in [ "RedHat", "Suse" ]
|
||||
|
||||
- name: Configure dhclient hooks for resolv.conf (RH-only)
|
||||
template:
|
||||
src: dhclient_dnsupdate_rh.sh.j2
|
||||
dest: "{{ dhclienthookfile }}"
|
||||
owner: root
|
||||
mode: 0755
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
when: ansible_os_family == "RedHat"
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
|
||||
# These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x
|
||||
# or when changing resolvconf_mode)
|
||||
|
||||
- name: Remove kubespray specific config from dhclient config
|
||||
blockinfile:
|
||||
path: "{{ dhclientconffile }}"
|
||||
state: absent
|
||||
backup: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
|
||||
- name: Remove kubespray specific dhclient hook
|
||||
file:
|
||||
path: "{{ dhclienthookfile }}"
|
||||
state: absent
|
||||
notify: Preinstall | propagate resolvconf to k8s components
|
||||
@@ -0,0 +1,44 @@
|
||||
---
|
||||
|
||||
# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
|
||||
|
||||
- name: install growpart
|
||||
package:
|
||||
name: cloud-utils-growpart
|
||||
state: present
|
||||
|
||||
- name: Gather mounts facts
|
||||
setup:
|
||||
gather_subset: 'mounts'
|
||||
|
||||
- name: Search root filesystem device
|
||||
vars:
|
||||
query: "[?mount=='/'].device"
|
||||
_root_device: "{{ ansible_mounts|json_query(query) }}"
|
||||
set_fact:
|
||||
device: "{{ _root_device | first | regex_replace('([^0-9]+)[0-9]+', '\\1') }}"
|
||||
partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}"
|
||||
root_device: "{{ _root_device }}"
|
||||
|
||||
- name: check if growpart needs to be run
|
||||
command: growpart -N {{ device }} {{ partition }}
|
||||
failed_when: False
|
||||
changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
|
||||
register: growpart_needed
|
||||
environment:
|
||||
LC_ALL: C
|
||||
|
||||
- name: check fs type
|
||||
command: file -Ls {{ root_device }}
|
||||
changed_when: False
|
||||
register: fs_type
|
||||
|
||||
- name: run growpart # noqa 503
|
||||
command: growpart {{ device }} {{ partition }}
|
||||
when: growpart_needed.changed
|
||||
environment:
|
||||
LC_ALL: C
|
||||
|
||||
- name: run xfs_growfs # noqa 503
|
||||
command: xfs_growfs {{ root_device }}
|
||||
when: growpart_needed.changed and 'XFS' in fs_type.stdout
|
||||
134
kubespray/roles/kubernetes/preinstall/tasks/main.yml
Normal file
134
kubespray/roles/kubernetes/preinstall/tasks/main.yml
Normal file
@@ -0,0 +1,134 @@
|
||||
---
|
||||
# Disable swap
|
||||
- import_tasks: 0010-swapoff.yml
|
||||
when:
|
||||
- not dns_late
|
||||
- disable_swap
|
||||
|
||||
- import_tasks: 0020-verify-settings.yml
|
||||
when:
|
||||
- not dns_late
|
||||
tags:
|
||||
- asserts
|
||||
|
||||
- import_tasks: 0040-set_facts.yml
|
||||
tags:
|
||||
- resolvconf
|
||||
- facts
|
||||
|
||||
- import_tasks: 0050-create_directories.yml
|
||||
when:
|
||||
- not dns_late
|
||||
|
||||
- import_tasks: 0060-resolvconf.yml
|
||||
when:
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
- systemd_resolved_enabled.rc != 0
|
||||
- networkmanager_enabled.rc != 0
|
||||
tags:
|
||||
- bootstrap-os
|
||||
- resolvconf
|
||||
|
||||
- import_tasks: 0061-systemd-resolved.yml
|
||||
when:
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
- systemd_resolved_enabled.rc == 0
|
||||
tags:
|
||||
- bootstrap-os
|
||||
- resolvconf
|
||||
|
||||
- import_tasks: 0062-networkmanager-unmanaged-devices.yml
|
||||
when:
|
||||
- networkmanager_enabled.rc == 0
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- import_tasks: 0063-networkmanager-dns.yml
|
||||
when:
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
- networkmanager_enabled.rc == 0
|
||||
tags:
|
||||
- bootstrap-os
|
||||
- resolvconf
|
||||
|
||||
- import_tasks: 0070-system-packages.yml
|
||||
when:
|
||||
- not dns_late
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- import_tasks: 0080-system-configurations.yml
|
||||
when:
|
||||
- not dns_late
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- import_tasks: 0081-ntp-configurations.yml
|
||||
when:
|
||||
- not dns_late
|
||||
- ntp_enabled
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- import_tasks: 0090-etchosts.yml
|
||||
when:
|
||||
- not dns_late
|
||||
tags:
|
||||
- bootstrap-os
|
||||
- etchosts
|
||||
|
||||
- import_tasks: 0100-dhclient-hooks.yml
|
||||
when:
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
- dhclientconffile is defined
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
tags:
|
||||
- bootstrap-os
|
||||
- resolvconf
|
||||
|
||||
- import_tasks: 0110-dhclient-hooks-undo.yml
|
||||
when:
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode != 'host_resolvconf'
|
||||
- dhclientconffile is defined
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
tags:
|
||||
- bootstrap-os
|
||||
- resolvconf
|
||||
|
||||
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
|
||||
# nameservers and search domains
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Check if we are running inside a Azure VM
|
||||
stat:
|
||||
path: /var/lib/waagent/
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: azure_check
|
||||
when:
|
||||
- not dns_late
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- import_tasks: 0120-growpart-azure-centos-7.yml
|
||||
when:
|
||||
- not dns_late
|
||||
- azure_check.stat.exists
|
||||
- ansible_os_family == "RedHat"
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: Run calico checks
|
||||
include_role:
|
||||
name: network_plugin/calico
|
||||
tasks_from: check
|
||||
when:
|
||||
- kube_network_plugin == 'calico'
|
||||
- not ignore_assert_errors
|
||||
@@ -0,0 +1,3 @@
|
||||
; This file contains the information which identifies the deployment state relative to the git repo
|
||||
[default]
|
||||
{{ gitinfo.stdout }}
|
||||
@@ -0,0 +1,27 @@
|
||||
# {{ ansible_managed }}
|
||||
|
||||
# Specify one or more NTP servers.
|
||||
# Use public servers from the pool.ntp.org project.
|
||||
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
|
||||
{% for server in ntp_servers %}
|
||||
server {{ server }}
|
||||
{% endfor %}
|
||||
|
||||
# Record the rate at which the system clock gains/losses time.
|
||||
driftfile /var/lib/chrony/drift
|
||||
|
||||
{% if ntp_tinker_panic is sameas true %}
|
||||
# Force time sync if the drift exceeds the threshold specified
|
||||
# Useful for VMs that can be paused and much later resumed.
|
||||
makestep 1.0 -1
|
||||
{% else %}
|
||||
# Allow the system clock to be stepped in the first three updates
|
||||
# if its offset is larger than 1 second.
|
||||
makestep 1.0 3
|
||||
{% endif %}
|
||||
|
||||
# Enable kernel synchronization of the real-time clock (RTC).
|
||||
rtcsync
|
||||
|
||||
# Specify directory for log files.
|
||||
logdir /var/log/chrony
|
||||
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Prepend resolver options to /etc/resolv.conf after dhclient`
|
||||
# regenerates the file. See man (5) resolver for more details.
|
||||
#
|
||||
if [ $reason = "BOUND" ]; then
|
||||
if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then
|
||||
RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d')
|
||||
OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}"
|
||||
|
||||
printf "%b\n" "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf
|
||||
fi
|
||||
fi
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user