dsk-dev kubespray 이동
This commit is contained in:
@@ -0,0 +1,31 @@
|
||||
---
|
||||
# Set etcd user/group
|
||||
etcd_owner: etcd
|
||||
|
||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
etcd_cert_alt_names:
|
||||
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||
- "etcd.kube-system.svc"
|
||||
- "etcd.kube-system"
|
||||
- "etcd"
|
||||
etcd_cert_alt_ips: []
|
||||
|
||||
etcd_heartbeat_interval: "250"
|
||||
etcd_election_timeout: "5000"
|
||||
|
||||
# etcd_snapshot_count: "10000"
|
||||
|
||||
etcd_metrics: "basic"
|
||||
|
||||
## A dictionary of extra environment variables to add to etcd.env, formatted like:
|
||||
## etcd_extra_vars:
|
||||
## var1: "value1"
|
||||
## var2: "value2"
|
||||
## Note this is different from the etcd role with ETCD_ prfexi, caps, and underscores
|
||||
etcd_extra_vars: {}
|
||||
|
||||
# etcd_quota_backend_bytes: "2147483648"
|
||||
# etcd_max_request_bytes: "1572864"
|
||||
|
||||
etcd_compaction_retention: "8"
|
||||
@@ -0,0 +1,118 @@
|
||||
---
|
||||
# bind address for kube-proxy
|
||||
kube_proxy_bind_address: '0.0.0.0'
|
||||
|
||||
# acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
|
||||
# default value of 'application/json'. This field will control all connections to the server used by a particular
|
||||
# client.
|
||||
kube_proxy_client_accept_content_types: ''
|
||||
|
||||
# burst allows extra queries to accumulate when a client is exceeding its rate.
|
||||
kube_proxy_client_burst: 10
|
||||
|
||||
# contentType is the content type used when sending data to the server from this client.
|
||||
kube_proxy_client_content_type: application/vnd.kubernetes.protobuf
|
||||
|
||||
# kubeconfig is the path to a KubeConfig file.
|
||||
# Leave as empty string to generate from other fields
|
||||
kube_proxy_client_kubeconfig: ''
|
||||
|
||||
# qps controls the number of queries per second allowed for this connection.
|
||||
kube_proxy_client_qps: 5
|
||||
|
||||
# How often configuration from the apiserver is refreshed. Must be greater than 0.
|
||||
kube_proxy_config_sync_period: 15m0s
|
||||
|
||||
### Conntrack
|
||||
# maxPerCore is the maximum number of NAT connections to track
|
||||
# per CPU core (0 to leave the limit as-is and ignore min).
|
||||
kube_proxy_conntrack_max_per_core: 32768
|
||||
|
||||
# min is the minimum value of connect-tracking records to allocate,
|
||||
# regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
|
||||
kube_proxy_conntrack_min: 131072
|
||||
|
||||
# tcpCloseWaitTimeout is how long an idle conntrack entry
|
||||
# in CLOSE_WAIT state will remain in the conntrack
|
||||
# table. (e.g. '60s'). Must be greater than 0 to set.
|
||||
kube_proxy_conntrack_tcp_close_wait_timeout: 1h0m0s
|
||||
|
||||
# tcpEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||
# (e.g. '2s'). Must be greater than 0 to set.
|
||||
kube_proxy_conntrack_tcp_established_timeout: 24h0m0s
|
||||
|
||||
# Enables profiling via web interface on /debug/pprof handler.
|
||||
# Profiling handlers will be handled by metrics server.
|
||||
kube_proxy_enable_profiling: false
|
||||
|
||||
# bind address for kube-proxy health check
|
||||
kube_proxy_healthz_bind_address: 0.0.0.0:10256
|
||||
|
||||
# If using the pure iptables proxy, SNAT everything. Note that it breaks any
|
||||
# policy engine.
|
||||
kube_proxy_masquerade_all: false
|
||||
|
||||
# If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with.
|
||||
# Must be within the range [0, 31].
|
||||
kube_proxy_masquerade_bit: 14
|
||||
|
||||
# The minimum interval of how often the iptables or ipvs rules can be refreshed as
|
||||
# endpoints and services change (e.g. '5s', '1m', '2h22m').
|
||||
kube_proxy_min_sync_period: 0s
|
||||
|
||||
# The maximum interval of how often iptables or ipvs rules are refreshed (e.g. '5s', '1m', '2h22m').
|
||||
# Must be greater than 0.
|
||||
kube_proxy_sync_period: 30s
|
||||
|
||||
# A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.
|
||||
kube_proxy_exclude_cidrs: []
|
||||
|
||||
# The ipvs scheduler type when proxy mode is ipvs
|
||||
# rr: round-robin
|
||||
# lc: least connection
|
||||
# dh: destination hashing
|
||||
# sh: source hashing
|
||||
# sed: shortest expected delay
|
||||
# nq: never queue
|
||||
kube_proxy_scheduler: rr
|
||||
|
||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||
# must be set to true for MetalLB, kube-vip(ARP enabled) to work
|
||||
kube_proxy_strict_arp: false
|
||||
|
||||
# kube_proxy_tcp_timeout is the timeout value used for idle IPVS TCP sessions.
|
||||
# The default value is 0, which preserves the current timeout value on the system.
|
||||
kube_proxy_tcp_timeout: 0s
|
||||
|
||||
# kube_proxy_tcp_fin_timeout is the timeout value used for IPVS TCP sessions after receiving a FIN.
|
||||
# The default value is 0, which preserves the current timeout value on the system.
|
||||
kube_proxy_tcp_fin_timeout: 0s
|
||||
|
||||
# kube_proxy_udp_timeout is the timeout value used for IPVS UDP packets.
|
||||
# The default value is 0, which preserves the current timeout value on the system.
|
||||
kube_proxy_udp_timeout: 0s
|
||||
|
||||
# The IP address and port for the metrics server to serve on
|
||||
# (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces)
|
||||
kube_proxy_metrics_bind_address: 127.0.0.1:10249
|
||||
|
||||
# A string slice of values which specify the addresses to use for NodePorts.
|
||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
|
||||
# The default empty string slice ([]) means to use all local addresses.
|
||||
kube_proxy_nodeport_addresses: >-
|
||||
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
|
||||
[{{ kube_proxy_nodeport_addresses_cidr }}]
|
||||
{%- else -%}
|
||||
[]
|
||||
{%- endif -%}
|
||||
|
||||
# oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
|
||||
kube_proxy_oom_score_adj: -999
|
||||
|
||||
# portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
# in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen.
|
||||
kube_proxy_port_range: ''
|
||||
|
||||
# udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
# Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
kube_proxy_udp_idle_timeout: 250ms
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# Extra args passed by kubeadm
|
||||
kube_kubeadm_scheduler_extra_args: {}
|
||||
|
||||
# Associated interface must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_scheduler_bind_address: 0.0.0.0
|
||||
|
||||
# ClientConnection options (e.g. Burst, QPS) except from kubeconfig.
|
||||
kube_scheduler_client_conn_extra_opts: {}
|
||||
|
||||
# Additional KubeSchedulerConfiguration settings (e.g. metricsBindAddress).
|
||||
kube_scheduler_config_extra_opts: {}
|
||||
|
||||
# List of scheduler extenders (dicts), each holding the values of how to
|
||||
# communicate with the extender.
|
||||
kube_scheduler_extenders: []
|
||||
|
||||
# Leader Election options (e.g. ResourceName, RetryPerion) except from
|
||||
# LeaseDuration and Renew deadline which are defined in following vars.
|
||||
kube_scheduler_leader_elect_extra_opts: {}
|
||||
|
||||
# Leader election lease duration
|
||||
kube_scheduler_leader_elect_lease_duration: 15s
|
||||
|
||||
# Leader election lease timeout
|
||||
kube_scheduler_leader_elect_renew_deadline: 10s
|
||||
|
||||
# Lisf of scheduling profiles (ditcs) supported by kube-scheduler
|
||||
kube_scheduler_profiles: []
|
||||
|
||||
# Extra volume mounts
|
||||
scheduler_extra_volumes: {}
|
||||
@@ -0,0 +1,230 @@
|
||||
---
|
||||
# disable upgrade cluster
|
||||
upgrade_cluster_setup: false
|
||||
|
||||
# By default the external API listens on all interfaces, this can be changed to
|
||||
# listen on a specific address/interface.
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too.
|
||||
kube_apiserver_bind_address: 0.0.0.0
|
||||
|
||||
# A port range to reserve for services with NodePort visibility.
|
||||
# Inclusive at both ends of the range.
|
||||
kube_apiserver_node_port_range: "30000-32767"
|
||||
|
||||
# ETCD backend for k8s data
|
||||
kube_apiserver_storage_backend: etcd3
|
||||
|
||||
# CIS 1.2.26
|
||||
# Validate that the service account token
|
||||
# in the request is actually present in etcd.
|
||||
kube_apiserver_service_account_lookup: true
|
||||
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Associated interfaces must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_controller_manager_bind_address: 0.0.0.0
|
||||
|
||||
# Leader election lease durations and timeouts for controller-manager
|
||||
kube_controller_manager_leader_elect_lease_duration: 15s
|
||||
kube_controller_manager_leader_elect_renew_deadline: 10s
|
||||
|
||||
# discovery_timeout modifies the discovery timeout
|
||||
discovery_timeout: 5m0s
|
||||
|
||||
# Instruct first master to refresh kubeadm token
|
||||
kubeadm_refresh_token: true
|
||||
|
||||
# Scale down coredns replicas to 0 if not using coredns dns_mode
|
||||
kubeadm_scale_down_coredns_enabled: true
|
||||
|
||||
# audit support
|
||||
kubernetes_audit: false
|
||||
# path to audit log file
|
||||
audit_log_path: /var/log/audit/kube-apiserver-audit.log
|
||||
# num days
|
||||
audit_log_maxage: 30
|
||||
# the num of audit logs to retain
|
||||
audit_log_maxbackups: 1
|
||||
# the max size in MB to retain
|
||||
audit_log_maxsize: 100
|
||||
# policy file
|
||||
audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||
# custom audit policy rules (to replace the default ones)
|
||||
# audit_policy_custom_rules: |
|
||||
# - level: None
|
||||
# users: []
|
||||
# verbs: []
|
||||
# resources: []
|
||||
|
||||
# audit log hostpath
|
||||
audit_log_name: audit-logs
|
||||
audit_log_hostpath: /var/log/kubernetes/audit
|
||||
audit_log_mountpath: "{{ audit_log_path | dirname }}"
|
||||
|
||||
# audit policy hostpath
|
||||
audit_policy_name: audit-policy
|
||||
audit_policy_hostpath: "{{ audit_policy_file | dirname }}"
|
||||
audit_policy_mountpath: "{{ audit_policy_hostpath }}"
|
||||
|
||||
# audit webhook support
|
||||
kubernetes_audit_webhook: false
|
||||
|
||||
# path to audit webhook config file
|
||||
audit_webhook_config_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-webhook-config.yaml"
|
||||
audit_webhook_server_url: "https://audit.app"
|
||||
audit_webhook_server_extra_args: {}
|
||||
audit_webhook_mode: batch
|
||||
audit_webhook_batch_max_size: 100
|
||||
audit_webhook_batch_max_wait: 1s
|
||||
|
||||
kube_controller_node_monitor_grace_period: 40s
|
||||
kube_controller_node_monitor_period: 5s
|
||||
kube_controller_terminated_pod_gc_threshold: 12500
|
||||
kube_apiserver_request_timeout: "1m0s"
|
||||
kube_apiserver_pod_eviction_not_ready_timeout_seconds: "300"
|
||||
kube_apiserver_pod_eviction_unreachable_timeout_seconds: "300"
|
||||
|
||||
# 1.10+ admission plugins
|
||||
kube_apiserver_enable_admission_plugins: []
|
||||
|
||||
# enable admission plugins configuration
|
||||
kube_apiserver_admission_control_config_file: false
|
||||
|
||||
# data structure to configure EventRateLimit admission plugin
|
||||
# this should have the following structure:
|
||||
# kube_apiserver_admission_event_rate_limits:
|
||||
# <limit_name>:
|
||||
# type: <limit_type>
|
||||
# qps: <qps_value>
|
||||
# burst: <burst_value>
|
||||
# cache_size: <cache_size_value>
|
||||
kube_apiserver_admission_event_rate_limits: {}
|
||||
|
||||
kube_pod_security_use_default: false
|
||||
kube_pod_security_default_enforce: baseline
|
||||
kube_pod_security_default_enforce_version: latest
|
||||
kube_pod_security_default_audit: restricted
|
||||
kube_pod_security_default_audit_version: latest
|
||||
kube_pod_security_default_warn: restricted
|
||||
kube_pod_security_default_warn_version: latest
|
||||
kube_pod_security_exemptions_usernames: []
|
||||
kube_pod_security_exemptions_runtime_class_names: []
|
||||
kube_pod_security_exemptions_namespaces:
|
||||
- kube-system
|
||||
|
||||
# 1.10+ list of disabled admission plugins
|
||||
kube_apiserver_disable_admission_plugins: []
|
||||
|
||||
# extra runtime config
|
||||
kube_api_runtime_config: []
|
||||
|
||||
## Enable/Disable Kube API Server Authentication Methods
|
||||
kube_token_auth: false
|
||||
kube_oidc_auth: false
|
||||
|
||||
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
|
||||
kube_webhook_token_auth: false
|
||||
kube_webhook_token_auth_url_skip_tls_verify: false
|
||||
# kube_webhook_token_auth_url: https://...
|
||||
## base64-encoded string of the webhook's CA certificate
|
||||
# kube_webhook_token_auth_ca_data: "LS0t..."
|
||||
|
||||
## Variables for webhook token authz https://kubernetes.io/docs/reference/access-authn-authz/webhook/
|
||||
# kube_webhook_authorization_url: https://...
|
||||
kube_webhook_authorization: false
|
||||
kube_webhook_authorization_url_skip_tls_verify: false
|
||||
|
||||
|
||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
|
||||
|
||||
# kube_oidc_url: https:// ...
|
||||
# kube_oidc_client_id: kubernetes
|
||||
## Optional settings for OIDC
|
||||
# kube_oidc_username_claim: sub
|
||||
# kube_oidc_username_prefix: 'oidc:'
|
||||
# kube_oidc_groups_claim: groups
|
||||
# kube_oidc_groups_prefix: 'oidc:'
|
||||
# Copy oidc CA file to the following path if needed
|
||||
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
|
||||
# Optionally include a base64-encoded oidc CA cert
|
||||
# kube_oidc_ca_cert: c3RhY2thYnVzZS5jb20...
|
||||
|
||||
# List of the preferred NodeAddressTypes to use for kubelet connections.
|
||||
kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP'
|
||||
|
||||
## Extra args for k8s components passing by kubeadm
|
||||
kube_kubeadm_apiserver_extra_args: {}
|
||||
kube_kubeadm_controller_extra_args: {}
|
||||
|
||||
## Extra control plane host volume mounts
|
||||
## Example:
|
||||
# apiserver_extra_volumes:
|
||||
# - name: name
|
||||
# hostPath: /host/path
|
||||
# mountPath: /mount/path
|
||||
# readOnly: true
|
||||
apiserver_extra_volumes: {}
|
||||
controller_manager_extra_volumes: {}
|
||||
|
||||
## Encrypting Secret Data at Rest
|
||||
kube_encrypt_secret_data: false
|
||||
kube_encrypt_token: "{{ lookup('password', credentials_dir + '/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
|
||||
# Must be either: aescbc, secretbox or aesgcm
|
||||
kube_encryption_algorithm: "secretbox"
|
||||
# Which kubernetes resources to encrypt
|
||||
kube_encryption_resources: [secrets]
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
kube_override_hostname: >-
|
||||
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
{%- else -%}
|
||||
{{ inventory_hostname }}
|
||||
{%- endif -%}
|
||||
|
||||
secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret"
|
||||
|
||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
|
||||
# tls_min_version: ""
|
||||
|
||||
## Support tls cipher suites.
|
||||
# tls_cipher_suites:
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
|
||||
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_RSA_WITH_RC4_128_SHA
|
||||
|
||||
## Amount of time to retain events. (default 1h0m0s)
|
||||
event_ttl_duration: "1h0m0s"
|
||||
|
||||
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||
auto_renew_certificates: false
|
||||
# First Monday of each month
|
||||
auto_renew_certificates_systemd_calendar: "{{ 'Mon *-*-1,2,3,4,5,6,7 03:' ~
|
||||
groups['kube_control_plane'].index(inventory_hostname) ~ '0:00' }}"
|
||||
# kubeadm renews all the certificates during control plane upgrade.
|
||||
# If we have requirement like without renewing certs upgrade the cluster,
|
||||
# we can opt out from the default behavior by setting kubeadm_upgrade_auto_cert_renewal to false
|
||||
kubeadm_upgrade_auto_cert_renewal: true
|
||||
@@ -0,0 +1,123 @@
|
||||
---
|
||||
- name: Master | restart kubelet
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | reload systemd
|
||||
- Master | reload kubelet
|
||||
- Master | wait for master static pods
|
||||
|
||||
- name: Master | wait for master static pods
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | wait for the apiserver to be running
|
||||
- Master | wait for kube-scheduler
|
||||
- Master | wait for kube-controller-manager
|
||||
|
||||
- name: Master | Restart apiserver
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | Remove apiserver container docker
|
||||
- Master | Remove apiserver container containerd/crio
|
||||
- Master | wait for the apiserver to be running
|
||||
|
||||
- name: Master | Restart kube-scheduler
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | Remove scheduler container docker
|
||||
- Master | Remove scheduler container containerd/crio
|
||||
- Master | wait for kube-scheduler
|
||||
|
||||
- name: Master | Restart kube-controller-manager
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | Remove controller manager container docker
|
||||
- Master | Remove controller manager container containerd/crio
|
||||
- Master | wait for kube-controller-manager
|
||||
|
||||
- name: Master | reload systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Master | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
|
||||
- name: Master | Remove apiserver container docker
|
||||
shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f
|
||||
register: remove_apiserver_container
|
||||
retries: 10
|
||||
until: remove_apiserver_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager == "docker"
|
||||
|
||||
- name: Master | Remove apiserver container containerd/crio
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: remove_apiserver_container
|
||||
retries: 10
|
||||
until: remove_apiserver_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager in ['containerd', 'crio']
|
||||
|
||||
- name: Master | Remove scheduler container docker
|
||||
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
register: remove_scheduler_container
|
||||
retries: 10
|
||||
until: remove_scheduler_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager == "docker"
|
||||
|
||||
- name: Master | Remove scheduler container containerd/crio
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: remove_scheduler_container
|
||||
retries: 10
|
||||
until: remove_scheduler_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager in ['containerd', 'crio']
|
||||
|
||||
- name: Master | Remove controller manager container docker
|
||||
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
register: remove_cm_container
|
||||
retries: 10
|
||||
until: remove_cm_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager == "docker"
|
||||
|
||||
- name: Master | Remove controller manager container containerd/crio
|
||||
shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
register: remove_cm_container
|
||||
retries: 10
|
||||
until: remove_cm_container.rc == 0
|
||||
delay: 1
|
||||
when: container_manager in ['containerd', 'crio']
|
||||
|
||||
- name: Master | wait for kube-scheduler
|
||||
vars:
|
||||
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||
uri:
|
||||
url: https://{{ endpoint }}:10259/healthz
|
||||
validate_certs: no
|
||||
register: scheduler_result
|
||||
until: scheduler_result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
|
||||
- name: Master | wait for kube-controller-manager
|
||||
vars:
|
||||
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||
uri:
|
||||
url: https://{{ endpoint }}:10257/healthz
|
||||
validate_certs: no
|
||||
register: controller_manager_result
|
||||
until: controller_manager_result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
|
||||
- name: Master | wait for the apiserver to be running
|
||||
uri:
|
||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||
validate_certs: no
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: kubernetes/tokens
|
||||
when: kube_token_auth
|
||||
tags:
|
||||
- k8s-secrets
|
||||
- role: adduser
|
||||
user: "{{ addusers.etcd }}"
|
||||
when:
|
||||
- etcd_deployment_type == "kubeadm"
|
||||
- not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos)
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
- name: Check which kube-control nodes are already members of the cluster
|
||||
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
|
||||
register: kube_control_planes_raw
|
||||
ignore_errors: yes
|
||||
changed_when: false
|
||||
|
||||
- name: Set fact joined_control_panes
|
||||
set_fact:
|
||||
joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}"
|
||||
delegate_to: item
|
||||
loop: "{{ groups['kube_control_plane'] }}"
|
||||
when: kube_control_planes_raw is succeeded
|
||||
run_once: yes
|
||||
|
||||
- name: Set fact first_kube_control_plane
|
||||
set_fact:
|
||||
first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}"
|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Check if secret for encrypting data at rest already exist
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: secrets_encryption_file
|
||||
|
||||
- name: Slurp secrets_encryption file if it exists
|
||||
slurp:
|
||||
src: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
register: secret_file_encoded
|
||||
when: secrets_encryption_file.stat.exists
|
||||
|
||||
- name: Base 64 Decode slurped secrets_encryption.yaml file
|
||||
set_fact:
|
||||
secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}"
|
||||
when: secrets_encryption_file.stat.exists
|
||||
|
||||
- name: Extract secret value from secrets_encryption.yaml
|
||||
set_fact:
|
||||
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
|
||||
when: secrets_encryption_file.stat.exists
|
||||
|
||||
- name: Set kube_encrypt_token across master nodes
|
||||
set_fact:
|
||||
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
|
||||
delegate_to: "{{ item }}"
|
||||
delegate_facts: true
|
||||
with_inventory_hostnames: kube_control_plane
|
||||
when: kube_encrypt_token_extracted is defined
|
||||
|
||||
- name: Write secrets for encrypting secret data at rest
|
||||
template:
|
||||
src: secrets_encryption.yaml.j2
|
||||
dest: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
owner: root
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
tags:
|
||||
- kube-apiserver
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Backup old certs and keys
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/{{ item }}"
|
||||
dest: "{{ kube_cert_dir }}/{{ item }}.old"
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
- apiserver-kubelet-client.crt
|
||||
- apiserver-kubelet-client.key
|
||||
- front-proxy-client.crt
|
||||
- front-proxy-client.key
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Backup old confs
|
||||
copy:
|
||||
src: "{{ kube_config_dir }}/{{ item }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item }}.old"
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- admin.conf
|
||||
- controller-manager.conf
|
||||
- kubelet.conf
|
||||
- scheduler.conf
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Calculate etcd cert serial
|
||||
command: "openssl x509 -in {{ kube_cert_dir }}/apiserver-etcd-client.crt -noout -serial"
|
||||
register: "etcd_client_cert_serial_result"
|
||||
changed_when: false
|
||||
tags:
|
||||
- network
|
||||
|
||||
- name: Set etcd_client_cert_serial
|
||||
set_fact:
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
|
||||
tags:
|
||||
- network
|
||||
|
||||
- name: Ensure etcdctl script is installed
|
||||
import_role:
|
||||
name: etcdctl
|
||||
when: etcd_deployment_type == "kubeadm"
|
||||
|
||||
- name: Set ownership for etcd data directory
|
||||
file:
|
||||
path: "{{ etcd_data_dir }}"
|
||||
owner: "{{ etcd_owner }}"
|
||||
group: "{{ etcd_owner }}"
|
||||
mode: 0700
|
||||
when: etcd_deployment_type == "kubeadm"
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
|
||||
- name: Update server field in component kubeconfigs
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||
regexp: '^ server: https'
|
||||
line: ' server: {{ kube_apiserver_endpoint }}'
|
||||
backup: yes
|
||||
with_items:
|
||||
- admin.conf
|
||||
- controller-manager.conf
|
||||
- kubelet.conf
|
||||
- scheduler.conf
|
||||
notify:
|
||||
- "Master | Restart kube-controller-manager"
|
||||
- "Master | Restart kube-scheduler"
|
||||
- "Master | reload kubelet"
|
||||
|
||||
- name: Update etcd-servers for apiserver
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/manifests/kube-apiserver.yaml"
|
||||
regexp: '^ - --etcd-servers='
|
||||
line: ' - --etcd-servers={{ etcd_access_addresses }}'
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
@@ -0,0 +1,79 @@
|
||||
---
|
||||
- name: Set kubeadm_discovery_address
|
||||
set_fact:
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint | regex_replace('https://', '') }}
|
||||
{%- endif %}
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Upload certificates so they are fresh and not expired
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
upload-certs
|
||||
--upload-certs
|
||||
register: kubeadm_upload_cert
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
|
||||
run_once: yes
|
||||
when:
|
||||
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
|
||||
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
|
||||
|
||||
- name: Create kubeadm ControlPlane config
|
||||
template:
|
||||
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
|
||||
mode: 0640
|
||||
backup: yes
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
host: "{{ kubeadm_discovery_address.split(':')[0] }}"
|
||||
port: "{{ kubeadm_discovery_address.split(':')[1] }}"
|
||||
timeout: 180
|
||||
|
||||
|
||||
- name: check already run
|
||||
debug:
|
||||
msg: "{{ kubeadm_already_run.stat.exists }}"
|
||||
|
||||
- name: Reset cert directory
|
||||
shell: >-
|
||||
if [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then
|
||||
{{ bin_dir }}/kubeadm reset -f --cert-dir {{ kube_cert_dir }};
|
||||
fi
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: Joining control plane node to the cluster.
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir }}/kubeadm-controlplane.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
register: kubeadm_join_control_plane
|
||||
retries: 3
|
||||
throttle: 1
|
||||
until: kubeadm_join_control_plane is succeeded
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
@@ -0,0 +1,248 @@
|
||||
---
|
||||
- name: Install OIDC certificate
|
||||
copy:
|
||||
content: "{{ kube_oidc_ca_cert | b64decode }}"
|
||||
dest: "{{ kube_oidc_ca_file }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when:
|
||||
- kube_oidc_auth
|
||||
- kube_oidc_ca_cert is defined
|
||||
|
||||
- name: kubeadm | Check if kubeadm has already run
|
||||
stat:
|
||||
path: "/var/lib/kubelet/config.yaml"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubeadm_already_run
|
||||
|
||||
- name: kubeadm | Backup kubeadm certs / kubeconfig
|
||||
import_tasks: kubeadm-backup.yml
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | aggregate all SANs
|
||||
set_fact:
|
||||
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
|
||||
vars:
|
||||
sans_base:
|
||||
- "kubernetes"
|
||||
- "kubernetes.default"
|
||||
- "kubernetes.default.svc"
|
||||
- "kubernetes.default.svc.{{ dns_domain }}"
|
||||
- "{{ kube_apiserver_ip }}"
|
||||
- "localhost"
|
||||
- "127.0.0.1"
|
||||
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
|
||||
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
|
||||
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
|
||||
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
|
||||
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
|
||||
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
|
||||
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
|
||||
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
|
||||
sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}"
|
||||
tags: facts
|
||||
|
||||
- name: Create audit-policy directory
|
||||
file:
|
||||
path: "{{ audit_policy_file | dirname }}"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||
|
||||
- name: Write api audit policy yaml
|
||||
template:
|
||||
src: apiserver-audit-policy.yaml.j2
|
||||
dest: "{{ audit_policy_file }}"
|
||||
mode: 0640
|
||||
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
|
||||
|
||||
- name: Write api audit webhook config yaml
|
||||
template:
|
||||
src: apiserver-audit-webhook-config.yaml.j2
|
||||
dest: "{{ audit_webhook_config_file }}"
|
||||
mode: 0640
|
||||
when: kubernetes_audit_webhook|default(false)
|
||||
|
||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||
- name: set kubeadm_config_api_fqdn define
|
||||
set_fact:
|
||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
|
||||
- name: Set kubeadm api version to v1beta3
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1beta3
|
||||
|
||||
- name: kubeadm | Create kubeadm config
|
||||
template:
|
||||
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
mode: 0640
|
||||
|
||||
- name: kubeadm | Create directory to store admission control configurations
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/admission-controls"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kube_apiserver_admission_control_config_file
|
||||
|
||||
- name: kubeadm | Push admission control config file
|
||||
template:
|
||||
src: "admission-controls.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
|
||||
mode: 0640
|
||||
when: kube_apiserver_admission_control_config_file
|
||||
|
||||
- name: kubeadm | Push admission control config files
|
||||
template:
|
||||
src: "{{ item|lower }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml"
|
||||
mode: 0640
|
||||
when:
|
||||
- kube_apiserver_admission_control_config_file
|
||||
- item in kube_apiserver_admission_plugins_needs_configuration
|
||||
loop: "{{ kube_apiserver_enable_admission_plugins }}"
|
||||
|
||||
- name: kubeadm | Check if apiserver.crt contains all needed SANs
|
||||
shell: |
|
||||
set -o pipefail
|
||||
for IP in {{ apiserver_ips | join(' ') }}; do
|
||||
openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkip $IP | grep -q 'does match certificate' || echo 'NEED-RENEW'
|
||||
done
|
||||
for HOST in {{ apiserver_hosts | join(' ') }}; do
|
||||
openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkhost $HOST | grep -q 'does match certificate' || echo 'NEED-RENEW'
|
||||
done
|
||||
vars:
|
||||
apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}"
|
||||
apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: apiserver_sans_check
|
||||
changed_when: "'NEED-RENEW' in apiserver_sans_check.stdout"
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 1/2
|
||||
file:
|
||||
state: absent
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 2/2
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm
|
||||
init phase certs apiserver
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
when:
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: kubeadm | Create directory to store kubeadm patches
|
||||
file:
|
||||
path: "{{ kubeadm_patches.dest_dir }}"
|
||||
state: directory
|
||||
mode: 0640
|
||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||
|
||||
- name: kubeadm | Copy kubeadm patches from inventory files
|
||||
copy:
|
||||
src: "{{ kubeadm_patches.source_dir }}/"
|
||||
dest: "{{ kubeadm_patches.dest_dir }}"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: >-
|
||||
timeout -k 300s 300s
|
||||
{{ bin_dir }}/kubeadm init
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--skip-phases={{ kubeadm_init_phases_skip | join(',') }}
|
||||
{{ kube_external_ca_mode | ternary('', '--upload-certs') }}
|
||||
register: kubeadm_init
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
|
||||
when: inventory_hostname == first_kube_control_plane and not kubeadm_already_run.stat.exists
|
||||
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: set kubeadm certificate key
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
|
||||
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
|
||||
when:
|
||||
- kubeadm_certificate_key is not defined
|
||||
- (item | trim) is match('.*--certificate-key.*')
|
||||
|
||||
- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :;
|
||||
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }}
|
||||
changed_when: false
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- kubeadm_token is defined
|
||||
- kubeadm_refresh_token
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: Create kubeadm token for joining nodes with 24h expiration (default)
|
||||
command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create"
|
||||
changed_when: false
|
||||
register: temp_token
|
||||
retries: 5
|
||||
delay: 5
|
||||
until: temp_token is succeeded
|
||||
delegate_to: "{{ first_kube_control_plane }}"
|
||||
when: kubeadm_token is not defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: Set kubeadm_token
|
||||
set_fact:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
when: temp_token.stdout is defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: PodSecurityPolicy | install PodSecurityPolicy
|
||||
include_tasks: psp-install.yml
|
||||
when:
|
||||
- podsecuritypolicy_enabled
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
|
||||
- name: kubeadm | Join other masters
|
||||
include_tasks: kubeadm-secondary.yml
|
||||
|
||||
- name: kubeadm | upgrade kubernetes cluster
|
||||
include_tasks: kubeadm-upgrade.yml
|
||||
when:
|
||||
- upgrade_cluster_setup
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||
- name: kubeadm | Remove taint for master with node role
|
||||
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
||||
delegate_to: "{{ first_kube_control_plane }}"
|
||||
with_items:
|
||||
- "node-role.kubernetes.io/master:NoSchedule-"
|
||||
- "node-role.kubernetes.io/control-plane:NoSchedule-"
|
||||
when: inventory_hostname in groups['kube_node']
|
||||
failed_when: false
|
||||
@@ -0,0 +1,75 @@
|
||||
---
|
||||
- name: kubeadm | Check api is up
|
||||
uri:
|
||||
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
register: _result
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: _result.status == 200
|
||||
|
||||
- name: kubeadm | Upgrade first master
|
||||
command: >-
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--allow-experimental-upgrades
|
||||
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }}
|
||||
--force
|
||||
register: kubeadm_upgrade
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_upgrade.rc == 0
|
||||
when: inventory_hostname == first_kube_control_plane
|
||||
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: kubeadm | Upgrade other masters
|
||||
command: >-
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--allow-experimental-upgrades
|
||||
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }}
|
||||
--force
|
||||
register: kubeadm_upgrade
|
||||
when: inventory_hostname != first_kube_control_plane
|
||||
failed_when:
|
||||
- kubeadm_upgrade.rc != 0
|
||||
- '"field is immutable" not in kubeadm_upgrade.stderr'
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: kubeadm | clean kubectl cache to refresh api types
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- /root/.kube/cache
|
||||
- /root/.kube/http-cache
|
||||
|
||||
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
||||
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
||||
command: >-
|
||||
{{ kubectl }}
|
||||
-n kube-system
|
||||
scale deployment/coredns --replicas 0
|
||||
register: scale_down_coredns
|
||||
retries: 6
|
||||
delay: 5
|
||||
until: scale_down_coredns is succeeded
|
||||
run_once: yes
|
||||
when:
|
||||
- kubeadm_scale_down_coredns_enabled
|
||||
- dns_mode not in ['coredns', 'coredns_dual']
|
||||
changed_when: false
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Fixup kubelet client cert rotation 1/2
|
||||
lineinfile:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '^ client-certificate-data: '
|
||||
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||
backup: yes
|
||||
notify:
|
||||
- "Master | reload kubelet"
|
||||
|
||||
- name: Fixup kubelet client cert rotation 2/2
|
||||
lineinfile:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '^ client-key-data: '
|
||||
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||
backup: yes
|
||||
notify:
|
||||
- "Master | reload kubelet"
|
||||
104
ansible/kubespray/roles/kubernetes/control-plane/tasks/main.yml
Normal file
104
ansible/kubespray/roles/kubernetes/control-plane/tasks/main.yml
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
- import_tasks: pre-upgrade.yml
|
||||
tags:
|
||||
- k8s-pre-upgrade
|
||||
|
||||
- name: Create webhook token auth config
|
||||
template:
|
||||
src: webhook-token-auth-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
|
||||
mode: 0640
|
||||
when: kube_webhook_token_auth|default(false)
|
||||
|
||||
- name: Create webhook authorization config
|
||||
template:
|
||||
src: webhook-authorization-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
|
||||
mode: 0640
|
||||
when: kube_webhook_authorization|default(false)
|
||||
|
||||
- name: Create kube-scheduler config
|
||||
template:
|
||||
src: kubescheduler-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
|
||||
mode: 0644
|
||||
|
||||
- import_tasks: encrypt-at-rest.yml
|
||||
when:
|
||||
- kube_encrypt_secret_data
|
||||
|
||||
- name: Install | Copy kubectl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/kubectl-{{ kube_version }}-{{ image_arch }}"
|
||||
dest: "{{ bin_dir }}/kubectl"
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
tags:
|
||||
- kubectl
|
||||
- upgrade
|
||||
|
||||
- name: Install kubectl bash completion
|
||||
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags:
|
||||
- kubectl
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Set kubectl bash completion file permissions
|
||||
file:
|
||||
path: /etc/bash_completion.d/kubectl.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags:
|
||||
- kubectl
|
||||
- upgrade
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
|
||||
set_fact:
|
||||
kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
|
||||
when: podsecuritypolicy_enabled
|
||||
|
||||
- name: Define nodes already joined to existing cluster and first_kube_control_plane
|
||||
import_tasks: define-first-kube-control.yml
|
||||
|
||||
- name: Include kubeadm setup
|
||||
import_tasks: kubeadm-setup.yml
|
||||
|
||||
- name: Include kubeadm etcd extra tasks
|
||||
include_tasks: kubeadm-etcd.yml
|
||||
when: etcd_deployment_type == "kubeadm"
|
||||
|
||||
- name: Include kubeadm secondary server apiserver fixes
|
||||
include_tasks: kubeadm-fix-apiserver.yml
|
||||
|
||||
- name: Include kubelet client cert rotation fixes
|
||||
include_tasks: kubelet-fix-client-cert-rotation.yml
|
||||
when: kubelet_rotate_certificates
|
||||
|
||||
- name: Install script to renew K8S control plane certificates
|
||||
template:
|
||||
src: k8s-certs-renew.sh.j2
|
||||
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
|
||||
mode: 0755
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 1/2
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- k8s-certs-renew.service
|
||||
- k8s-certs-renew.timer
|
||||
register: k8s_certs_units
|
||||
when: auto_renew_certificates
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 2/2
|
||||
systemd:
|
||||
name: k8s-certs-renew.timer
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon-reload: "{{ k8s_certs_units is changed }}"
|
||||
when: auto_renew_certificates
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
|
||||
file:
|
||||
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
|
||||
state: absent
|
||||
with_items:
|
||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
register: kube_apiserver_manifest_replaced
|
||||
when: etcd_secret_changed|default(false)
|
||||
|
||||
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503
|
||||
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
with_items:
|
||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
when: kube_apiserver_manifest_replaced.changed
|
||||
register: remove_master_container
|
||||
retries: 10
|
||||
until: remove_master_container.rc == 0
|
||||
delay: 1
|
||||
@@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: Check AppArmor status
|
||||
command: which apparmor_parser
|
||||
register: apparmor_status
|
||||
failed_when: false
|
||||
changed_when: apparmor_status.rc != 0
|
||||
|
||||
- name: Set apparmor_enabled
|
||||
set_fact:
|
||||
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||
|
||||
- name: Render templates for PodSecurityPolicy
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0640
|
||||
register: psp_manifests
|
||||
with_items:
|
||||
- {file: psp.yml, type: psp, name: psp}
|
||||
- {file: psp-cr.yml, type: clusterrole, name: psp-cr}
|
||||
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
|
||||
|
||||
- name: Add policies, roles, bindings for PodSecurityPolicy
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
register: result
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 6
|
||||
with_items: "{{ psp_manifests.results }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_dir }}/admin.conf"
|
||||
loop_control:
|
||||
label: "{{ item.item.file }}"
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: AdmissionConfiguration
|
||||
plugins:
|
||||
{% for plugin in kube_apiserver_enable_admission_plugins %}
|
||||
{% if plugin in kube_apiserver_admission_plugins_needs_configuration %}
|
||||
- name: {{ plugin }}
|
||||
path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,129 @@
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
{% if audit_policy_custom_rules is defined and audit_policy_custom_rules != "" %}
|
||||
{{ audit_policy_custom_rules | indent(2, true) }}
|
||||
{% else %}
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads `configmaps/ingress-uid` through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
# Don't log events requests.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
{% endif %}
|
||||
@@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: {{ audit_webhook_server_url }}
|
||||
{% for key in audit_webhook_server_extra_args %}
|
||||
{{ key }}: "{{ audit_webhook_server_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
name: auditsink
|
||||
contexts:
|
||||
- context:
|
||||
cluster: auditsink
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
|
||||
kind: Configuration
|
||||
limits:
|
||||
{% for limit in kube_apiserver_admission_event_rate_limits.values() %}
|
||||
- type: {{ limit.type }}
|
||||
qps: {{ limit.qps }}
|
||||
burst: {{ limit.burst }}
|
||||
{% if limit.cache_size is defined %}
|
||||
cacheSize: {{ limit.cache_size }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,6 @@
|
||||
[Unit]
|
||||
Description=Renew K8S control plane certificates
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ bin_dir }}/k8s-certs-renew.sh
|
||||
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "## Expiration before renewal ##"
|
||||
{{ bin_dir }}/kubeadm certs check-expiration
|
||||
|
||||
echo "## Renewing certificates managed by kubeadm ##"
|
||||
{{ bin_dir }}/kubeadm certs renew all
|
||||
|
||||
echo "## Restarting control plane pods managed by kubeadm ##"
|
||||
{% if container_manager == "docker" %}
|
||||
{{ docker_bin_dir }}/docker ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs {{ docker_bin_dir }}/docker rm -f
|
||||
{% else %}
|
||||
{{ bin_dir }}/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs {{ bin_dir }}/crictl rmp -f
|
||||
{% endif %}
|
||||
|
||||
echo "## Updating /root/.kube/config ##"
|
||||
cp {{ kube_config_dir }}/admin.conf /root/.kube/config
|
||||
|
||||
echo "## Waiting for apiserver to be up again ##"
|
||||
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
|
||||
|
||||
echo "## Expiration after renewal ##"
|
||||
{{ bin_dir }}/kubeadm certs check-expiration
|
||||
@@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Timer to renew K8S control plane certificates
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ auto_renew_certificates_systemd_calendar }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,453 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: InitConfiguration
|
||||
{% if kubeadm_token is defined %}
|
||||
bootstrapTokens:
|
||||
- token: "{{ kubeadm_token }}"
|
||||
description: "kubespray kubeadm bootstrap token"
|
||||
ttl: "24h"
|
||||
{% endif %}
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
{% if kubeadm_certificate_key is defined %}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
{% endif %}
|
||||
nodeRegistration:
|
||||
{% if kube_override_hostname|default('') %}
|
||||
name: {{ kube_override_hostname }}
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
{% else %}
|
||||
taints: []
|
||||
{% endif %}
|
||||
criSocket: {{ cri_socket }}
|
||||
{% if cloud_provider is defined and cloud_provider in ["external"] %}
|
||||
kubeletExtraArgs:
|
||||
cloud-provider: external
|
||||
{% endif %}
|
||||
{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches.dest_dir }}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: ClusterConfiguration
|
||||
clusterName: {{ cluster_name }}
|
||||
etcd:
|
||||
{% if etcd_deployment_type != "kubeadm" %}
|
||||
external:
|
||||
endpoints:
|
||||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
|
||||
{% elif etcd_deployment_type == "kubeadm" %}
|
||||
local:
|
||||
imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}"
|
||||
imageTag: "{{ etcd_image_tag }}"
|
||||
dataDir: "{{ etcd_data_dir }}"
|
||||
extraArgs:
|
||||
metrics: {{ etcd_metrics }}
|
||||
election-timeout: "{{ etcd_election_timeout }}"
|
||||
heartbeat-interval: "{{ etcd_heartbeat_interval }}"
|
||||
auto-compaction-retention: "{{ etcd_compaction_retention }}"
|
||||
{% if etcd_snapshot_count is defined %}
|
||||
snapshot-count: "{{ etcd_snapshot_count }}"
|
||||
{% endif %}
|
||||
{% if etcd_quota_backend_bytes is defined %}
|
||||
quota-backend-bytes: "{{ etcd_quota_backend_bytes }}"
|
||||
{% endif %}
|
||||
{% if etcd_max_request_bytes is defined %}
|
||||
max-request-bytes: "{{ etcd_max_request_bytes }}"
|
||||
{% endif %}
|
||||
{% if etcd_log_level is defined %}
|
||||
log-level: "{{ etcd_log_level }}"
|
||||
{% endif %}
|
||||
{% for key, value in etcd_extra_vars.items() %}
|
||||
{{ key }}: "{{ value }}"
|
||||
{% endfor %}
|
||||
serverCertSANs:
|
||||
{% for san in etcd_cert_alt_names %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
{% for san in etcd_cert_alt_ips %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
peerCertSANs:
|
||||
{% for san in etcd_cert_alt_names %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
{% for san in etcd_cert_alt_ips %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
dns:
|
||||
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }}
|
||||
imageTag: {{ coredns_image_tag }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
{% if kubeadm_feature_gates %}
|
||||
featureGates:
|
||||
{% for feature in kubeadm_feature_gates %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
controlPlaneEndpoint: {{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}
|
||||
{% endif %}
|
||||
certificatesDir: {{ kube_cert_dir }}
|
||||
imageRepository: {{ kube_image_repo }}
|
||||
apiServer:
|
||||
extraArgs:
|
||||
{% if kube_apiserver_pod_eviction_not_ready_timeout_seconds is defined %}
|
||||
default-not-ready-toleration-seconds: "{{ kube_apiserver_pod_eviction_not_ready_timeout_seconds }}"
|
||||
{% endif %}
|
||||
{% if kube_apiserver_pod_eviction_unreachable_timeout_seconds is defined %}
|
||||
default-unreachable-toleration-seconds: "{{ kube_apiserver_pod_eviction_unreachable_timeout_seconds }}"
|
||||
{% endif %}
|
||||
{% if kube_api_anonymous_auth is defined %}
|
||||
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||
{% endif %}
|
||||
authorization-mode: {{ authorization_modes | join(',') }}
|
||||
bind-address: {{ kube_apiserver_bind_address }}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_apiserver_admission_control_config_file %}
|
||||
admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml
|
||||
{% endif %}
|
||||
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
|
||||
disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
endpoint-reconciler-type: lease
|
||||
{% if etcd_events_cluster_enabled %}
|
||||
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}"
|
||||
{% endif %}
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||
profiling: "{{ kube_profiling }}"
|
||||
request-timeout: "{{ kube_apiserver_request_timeout }}"
|
||||
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||
{% if kube_token_auth|default(true) %}
|
||||
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
|
||||
{% endif %}
|
||||
{% if kube_apiserver_service_account_lookup %}
|
||||
service-account-lookup: "{{ kube_apiserver_service_account_lookup }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||
oidc-issuer-url: "{{ kube_oidc_url }}"
|
||||
oidc-client-id: "{{ kube_oidc_client_id }}"
|
||||
{% if kube_oidc_ca_file is defined %}
|
||||
oidc-ca-file: "{{ kube_oidc_ca_file }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_username_claim is defined %}
|
||||
oidc-username-claim: "{{ kube_oidc_username_claim }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_groups_claim is defined %}
|
||||
oidc-groups-claim: "{{ kube_oidc_groups_claim }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_username_prefix is defined %}
|
||||
oidc-username-prefix: "{{ kube_oidc_username_prefix }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_groups_prefix is defined %}
|
||||
oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if kube_webhook_token_auth|default(false) %}
|
||||
authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_webhook_authorization|default(false) %}
|
||||
authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_encrypt_secret_data %}
|
||||
encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
|
||||
{% endif %}
|
||||
storage-backend: {{ kube_apiserver_storage_backend }}
|
||||
{% if kube_api_runtime_config|length > 0 %}
|
||||
runtime-config: {{ kube_api_runtime_config | join(',') }}
|
||||
{% endif %}
|
||||
allow-privileged: "true"
|
||||
{% if kubernetes_audit or kubernetes_audit_webhook %}
|
||||
audit-policy-file: {{ audit_policy_file }}
|
||||
{% endif %}
|
||||
{% if kubernetes_audit %}
|
||||
audit-log-path: "{{ audit_log_path }}"
|
||||
audit-log-maxage: "{{ audit_log_maxage }}"
|
||||
audit-log-maxbackup: "{{ audit_log_maxbackups }}"
|
||||
audit-log-maxsize: "{{ audit_log_maxsize }}"
|
||||
{% endif %}
|
||||
{% if kubernetes_audit_webhook %}
|
||||
audit-webhook-config-file: {{ audit_webhook_config_file }}
|
||||
audit-webhook-mode: {{ audit_webhook_mode }}
|
||||
{% if audit_webhook_mode == "batch" %}
|
||||
audit-webhook-batch-max-size: "{{ audit_webhook_batch_max_size }}"
|
||||
audit-webhook-batch-max-wait: "{{ audit_webhook_batch_max_wait }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% for key in kube_kubeadm_apiserver_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if kube_apiserver_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_apiserver_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
cloud-provider: {{ cloud_provider }}
|
||||
cloud-config: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tls-min-version: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% if event_ttl_duration is defined %}
|
||||
event-ttl: {{ event_ttl_duration }}
|
||||
{% endif %}
|
||||
{% if kubelet_rotate_server_certificates %}
|
||||
kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
{% endif %}
|
||||
{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %}
|
||||
extraVolumes:
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
- name: cloud-config
|
||||
hostPath: {{ kube_config_dir }}/cloud_config
|
||||
mountPath: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_token_auth|default(true) %}
|
||||
- name: token-auth-config
|
||||
hostPath: {{ kube_token_dir }}
|
||||
mountPath: {{ kube_token_dir }}
|
||||
{% endif %}
|
||||
{% if kube_webhook_token_auth|default(false) %}
|
||||
- name: webhook-token-auth-config
|
||||
hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_webhook_authorization|default(false) %}
|
||||
- name: webhook-authorization-config
|
||||
hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
|
||||
{% endif %}
|
||||
{% if kubernetes_audit or kubernetes_audit_webhook %}
|
||||
- name: {{ audit_policy_name }}
|
||||
hostPath: {{ audit_policy_hostpath }}
|
||||
mountPath: {{ audit_policy_mountpath }}
|
||||
{% if audit_log_path != "-" %}
|
||||
- name: {{ audit_log_name }}
|
||||
hostPath: {{ audit_log_hostpath }}
|
||||
mountPath: {{ audit_log_mountpath }}
|
||||
readOnly: false
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if kube_apiserver_admission_control_config_file %}
|
||||
- name: admission-control-configs
|
||||
hostPath: {{ kube_config_dir }}/admission-controls
|
||||
mountPath: {{ kube_config_dir }}
|
||||
readOnly: false
|
||||
pathType: DirectoryOrCreate
|
||||
{% endif %}
|
||||
{% for volume in apiserver_extra_volumes %}
|
||||
- name: {{ volume.name }}
|
||||
hostPath: {{ volume.hostPath }}
|
||||
mountPath: {{ volume.mountPath }}
|
||||
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
|
||||
{% endfor %}
|
||||
{% if ssl_ca_dirs|length %}
|
||||
{% for dir in ssl_ca_dirs %}
|
||||
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
|
||||
hostPath: {{ dir }}
|
||||
mountPath: {{ dir }}
|
||||
readOnly: true
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
certSANs:
|
||||
{% for san in apiserver_sans %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
timeoutForControlPlane: 5m0s
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if enable_dual_stack_networks %}
|
||||
node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}"
|
||||
node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}"
|
||||
{% else %}
|
||||
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
|
||||
{% endif %}
|
||||
profiling: "{{ kube_profiling }}"
|
||||
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
|
||||
bind-address: {{ kube_controller_manager_bind_address }}
|
||||
leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }}
|
||||
leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }}
|
||||
{% if kube_controller_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_controller_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
{% endif %}
|
||||
{% for key in kube_kubeadm_controller_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
cloud-provider: {{ cloud_provider }}
|
||||
cloud-config: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["cloud"] %}
|
||||
configure-cloud-routes: "false"
|
||||
{% endif %}
|
||||
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
||||
flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}}
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tls-min-version: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] or controller_manager_extra_volumes %}
|
||||
extraVolumes:
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
|
||||
- name: openstackcacert
|
||||
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
|
||||
- name: cloud-config
|
||||
hostPath: {{ kube_config_dir }}/cloud_config
|
||||
mountPath: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% for volume in controller_manager_extra_volumes %}
|
||||
- name: {{ volume.name }}
|
||||
hostPath: {{ volume.hostPath }}
|
||||
mountPath: {{ volume.mountPath }}
|
||||
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: {{ kube_scheduler_bind_address }}
|
||||
config: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
{% if kube_scheduler_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
{% endif %}
|
||||
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
|
||||
{% for key in kube_kubeadm_scheduler_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if tls_min_version is defined %}
|
||||
tls-min-version: {{ tls_min_version }}
|
||||
{% endif %}
|
||||
{% if tls_cipher_suites is defined %}
|
||||
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
extraVolumes:
|
||||
- name: kubescheduler-config
|
||||
hostPath: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
readOnly: true
|
||||
{% if scheduler_extra_volumes %}
|
||||
{% for volume in scheduler_extra_volumes %}
|
||||
- name: {{ volume.name }}
|
||||
hostPath: {{ volume.hostPath }}
|
||||
mountPath: {{ volume.mountPath }}
|
||||
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
bindAddress: {{ kube_proxy_bind_address }}
|
||||
clientConnection:
|
||||
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
|
||||
burst: {{ kube_proxy_client_burst }}
|
||||
contentType: {{ kube_proxy_client_content_type }}
|
||||
kubeconfig: {{ kube_proxy_client_kubeconfig }}
|
||||
qps: {{ kube_proxy_client_qps }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
configSyncPeriod: {{ kube_proxy_config_sync_period }}
|
||||
conntrack:
|
||||
maxPerCore: {{ kube_proxy_conntrack_max_per_core }}
|
||||
min: {{ kube_proxy_conntrack_min }}
|
||||
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
|
||||
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
|
||||
enableProfiling: {{ kube_proxy_enable_profiling }}
|
||||
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
|
||||
hostnameOverride: {{ kube_override_hostname }}
|
||||
iptables:
|
||||
masqueradeAll: {{ kube_proxy_masquerade_all }}
|
||||
masqueradeBit: {{ kube_proxy_masquerade_bit }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
ipvs:
|
||||
excludeCIDRs: {{ kube_proxy_exclude_cidrs }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
scheduler: {{ kube_proxy_scheduler }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
strictARP: {{ kube_proxy_strict_arp }}
|
||||
tcpTimeout: {{ kube_proxy_tcp_timeout }}
|
||||
tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }}
|
||||
udpTimeout: {{ kube_proxy_udp_timeout }}
|
||||
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
|
||||
mode: {{ kube_proxy_mode }}
|
||||
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
|
||||
oomScoreAdj: {{ kube_proxy_oom_score_adj }}
|
||||
portRange: {{ kube_proxy_port_range }}
|
||||
udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }}
|
||||
{% if kube_proxy_feature_gates or kube_feature_gates %}
|
||||
{% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %}
|
||||
featureGates:
|
||||
{% for feature in feature_gates %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{# DNS settings for kubelet #}
|
||||
{% if enable_nodelocaldns %}
|
||||
{% set kubelet_cluster_dns = [nodelocaldns_ip] %}
|
||||
{% elif dns_mode in ['coredns'] %}
|
||||
{% set kubelet_cluster_dns = [skydns_server] %}
|
||||
{% elif dns_mode == 'coredns_dual' %}
|
||||
{% set kubelet_cluster_dns = [skydns_server,skydns_server_secondary] %}
|
||||
{% elif dns_mode == 'manual' %}
|
||||
{% set kubelet_cluster_dns = [manual_dns_server] %}
|
||||
{% else %}
|
||||
{% set kubelet_cluster_dns = [] %}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
clusterDNS:
|
||||
{% for dns_address in kubelet_cluster_dns %}
|
||||
- {{ dns_address }}
|
||||
{% endfor %}
|
||||
{% if kubelet_feature_gates or kube_feature_gates %}
|
||||
{% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %}
|
||||
featureGates:
|
||||
{% for feature in feature_gates %}
|
||||
{{ feature|replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,34 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: JoinConfiguration
|
||||
discovery:
|
||||
bootstrapToken:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
apiServerEndpoint: {{ kubeadm_discovery_address }}
|
||||
{% endif %}
|
||||
token: {{ kubeadm_token }}
|
||||
unsafeSkipCAVerification: true
|
||||
timeout: {{ discovery_timeout }}
|
||||
tlsBootstrapToken: {{ kubeadm_token }}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
nodeRegistration:
|
||||
name: {{ kube_override_hostname|default(inventory_hostname) }}
|
||||
criSocket: {{ cri_socket }}
|
||||
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
{% else %}
|
||||
taints: []
|
||||
{% endif %}
|
||||
{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches.dest_dir }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,25 @@
|
||||
{% set kubescheduler_config_api_version = "v1beta3" %}
|
||||
apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }}
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: "{{ kube_config_dir }}/scheduler.conf"
|
||||
{% for key in kube_scheduler_client_conn_extra_opts %}
|
||||
{{ key }}: {{ kube_scheduler_client_conn_extra_opts[key] }}
|
||||
{% endfor %}
|
||||
{% if kube_scheduler_extenders %}
|
||||
extenders:
|
||||
{{ kube_scheduler_extenders | to_nice_yaml(indent=2, width=256) }}
|
||||
{% endif %}
|
||||
leaderElection:
|
||||
leaseDuration: {{ kube_scheduler_leader_elect_lease_duration }}
|
||||
renewDeadline: {{ kube_scheduler_leader_elect_renew_deadline }}
|
||||
{% for key in kube_scheduler_leader_elect_extra_opts %}
|
||||
{{ key }}: {{ kube_scheduler_leader_elect_extra_opts[key] }}
|
||||
{% endfor %}
|
||||
{% if kube_scheduler_profiles %}
|
||||
profiles:
|
||||
{{ kube_scheduler_profiles | to_nice_yaml(indent=2, width=256) }}
|
||||
{% endif %}
|
||||
{% for key in kube_scheduler_config_extra_opts %}
|
||||
{{ key }}: {{ kube_scheduler_config_extra_opts[key] }}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,17 @@
|
||||
{% if kube_pod_security_use_default %}
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
kind: PodSecurityConfiguration
|
||||
defaults:
|
||||
enforce: "{{ kube_pod_security_default_enforce }}"
|
||||
enforce-version: "{{ kube_pod_security_default_enforce_version }}"
|
||||
audit: "{{ kube_pod_security_default_audit }}"
|
||||
audit-version: "{{ kube_pod_security_default_audit_version }}"
|
||||
warn: "{{ kube_pod_security_default_warn }}"
|
||||
warn-version: "{{ kube_pod_security_default_warn_version }}"
|
||||
exemptions:
|
||||
usernames: {{ kube_pod_security_exemptions_usernames|to_json }}
|
||||
runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }}
|
||||
namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }}
|
||||
{% else %}
|
||||
# This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,32 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- privileged
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- restricted
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
@@ -0,0 +1,54 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: psp:any:restricted
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:restricted
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:authenticated
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: psp:kube-system:privileged
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:privileged
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:masters
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:kube-system
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: psp:nodes:privileged
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
|
||||
be used in combination with the NodeRestriction admission plugin to limit
|
||||
nodes to mirror pods bound to themselves.'
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:privileged
|
||||
subjects:
|
||||
- kind: Group
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: system:nodes
|
||||
- kind: User
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
# Legacy node ID
|
||||
name: kubelet
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: restricted
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
{% if apparmor_enabled %}
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
{% endif %}
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
{{ podsecuritypolicy_restricted_spec | to_yaml(indent=2, width=1337) | indent(width=2) }}
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
{{ podsecuritypolicy_privileged_spec | to_yaml(indent=2, width=1337) | indent(width=2) }}
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: EncryptionConfiguration
|
||||
resources:
|
||||
- resources:
|
||||
{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }}
|
||||
providers:
|
||||
- {{ kube_encryption_algorithm }}:
|
||||
keys:
|
||||
- name: key
|
||||
secret: {{ kube_encrypt_token | b64encode }}
|
||||
- identity: {}
|
||||
@@ -0,0 +1,18 @@
|
||||
# clusters refers to the remote service.
|
||||
clusters:
|
||||
- name: webhook-token-authz-cluster
|
||||
cluster:
|
||||
server: {{ kube_webhook_authorization_url }}
|
||||
insecure-skip-tls-verify: {{ kube_webhook_authorization_url_skip_tls_verify }}
|
||||
|
||||
# users refers to the API server's webhook configuration.
|
||||
users:
|
||||
- name: webhook-token-authz-user
|
||||
|
||||
# kubeconfig files require a context. Provide one for the API server.
|
||||
current-context: webhook-token-authz
|
||||
contexts:
|
||||
- context:
|
||||
cluster: webhook-token-authz-cluster
|
||||
user: webhook-token-authz-user
|
||||
name: webhook-token-authz
|
||||
@@ -0,0 +1,21 @@
|
||||
# clusters refers to the remote service.
|
||||
clusters:
|
||||
- name: webhook-token-auth-cluster
|
||||
cluster:
|
||||
server: {{ kube_webhook_token_auth_url }}
|
||||
insecure-skip-tls-verify: {{ kube_webhook_token_auth_url_skip_tls_verify }}
|
||||
{% if kube_webhook_token_auth_ca_data is defined %}
|
||||
certificate-authority-data: {{ kube_webhook_token_auth_ca_data }}
|
||||
{% endif %}
|
||||
|
||||
# users refers to the API server's webhook configuration.
|
||||
users:
|
||||
- name: webhook-token-auth-user
|
||||
|
||||
# kubeconfig files require a context. Provide one for the API server.
|
||||
current-context: webhook-token-auth
|
||||
contexts:
|
||||
- context:
|
||||
cluster: webhook-token-auth-cluster
|
||||
user: webhook-token-auth-user
|
||||
name: webhook-token-auth
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# list of admission plugins that needs to be configured
|
||||
kube_apiserver_admission_plugins_needs_configuration: [EventRateLimit, PodSecurity]
|
||||
Reference in New Issue
Block a user