dsk-dev kubespray 이동
This commit is contained in:
165
ansible/kubespray/roles/network_plugin/calico/defaults/main.yml
Normal file
165
ansible/kubespray/roles/network_plugin/calico/defaults/main.yml
Normal file
@@ -0,0 +1,165 @@
|
||||
---
|
||||
# the default value of name
|
||||
calico_cni_name: k8s-pod-network
|
||||
|
||||
# Enables Internet connectivity from containers
|
||||
nat_outgoing: true
|
||||
|
||||
# add default ippool name
|
||||
calico_pool_name: "default-pool"
|
||||
calico_ipv4pool_ipip: "Off"
|
||||
|
||||
# Change encapsulation mode, by default we enable vxlan which is the most mature and well tested mode
|
||||
calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet'
|
||||
calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet'
|
||||
|
||||
calico_cni_pool: true
|
||||
calico_cni_pool_ipv6: true
|
||||
|
||||
# add default ippool blockSize (defaults kube_network_node_prefix)
|
||||
calico_pool_blocksize: 26
|
||||
|
||||
# Calico doesn't support ipip tunneling for the IPv6.
|
||||
calico_ipip_mode_ipv6: Never
|
||||
calico_vxlan_mode_ipv6: Never
|
||||
|
||||
# add default ipv6 ippool blockSize (defaults kube_network_node_prefix_ipv6)
|
||||
calico_pool_blocksize_ipv6: 122
|
||||
|
||||
# Calico network backend can be 'bird', 'vxlan' and 'none'
|
||||
calico_network_backend: vxlan
|
||||
|
||||
calico_cert_dir: /etc/calico/certs
|
||||
|
||||
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||
global_as_num: "64512"
|
||||
|
||||
# You can set MTU value here. If left undefined or empty, it will
|
||||
# not be specified in calico CNI config, so Calico will use built-in
|
||||
# defaults. The value should be a number, not a string.
|
||||
# calico_mtu: 1500
|
||||
|
||||
# Advertise Service External IPs
|
||||
calico_advertise_service_external_ips: []
|
||||
|
||||
# Advertise Service LoadBalancer IPs
|
||||
calico_advertise_service_loadbalancer_ips: []
|
||||
|
||||
# Calico eBPF support
|
||||
calico_bpf_enabled: false
|
||||
calico_bpf_log_level: ""
|
||||
# Valid option for service mode: Tunnel (default), DSR=Direct Server Return
|
||||
calico_bpf_service_mode: Tunnel
|
||||
|
||||
# Limits for apps
|
||||
calico_node_memory_limit: 500M
|
||||
calico_node_cpu_limit: 300m
|
||||
calico_node_memory_requests: 64M
|
||||
calico_node_cpu_requests: 150m
|
||||
calico_felix_chaininsertmode: Insert
|
||||
|
||||
# Calico daemonset nodeselector
|
||||
calico_ds_nodeselector: "kubernetes.io/os: linux"
|
||||
|
||||
# Virtual network ID to use for VXLAN traffic. A value of 0 means “use the kernel default”.
|
||||
calico_vxlan_vni: 4096
|
||||
|
||||
# Port to use for VXLAN traffic. A value of 0 means “use the kernel default”.
|
||||
calico_vxlan_port: 4789
|
||||
|
||||
# Enable Prometheus Metrics endpoint for felix
|
||||
calico_felix_prometheusmetricsenabled: false
|
||||
calico_felix_prometheusmetricsport: 9091
|
||||
calico_felix_prometheusgometricsenabled: true
|
||||
calico_felix_prometheusprocessmetricsenabled: true
|
||||
|
||||
# Set the agent log level. Can be debug, warning, info or fatal
|
||||
calico_loglevel: info
|
||||
calico_node_startup_loglevel: error
|
||||
|
||||
# Set log path for calico CNI plugin. Set to false to disable logging to disk.
|
||||
calico_cni_log_file_path: /var/log/calico/cni/cni.log
|
||||
|
||||
# Enable or disable usage report to 'usage.projectcalico.org'
|
||||
calico_usage_reporting: false
|
||||
|
||||
# Should calico ignore kernel's RPF check setting,
|
||||
# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198
|
||||
calico_node_ignorelooserpf: false
|
||||
|
||||
# Define address on which Felix will respond to health requests
|
||||
calico_healthhost: "localhost"
|
||||
|
||||
# Configure time in seconds that calico will wait for the iptables lock
|
||||
calico_iptables_lock_timeout_secs: 10
|
||||
|
||||
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND)
|
||||
calico_iptables_backend: "Auto"
|
||||
|
||||
# Calico Wireguard support
|
||||
calico_wireguard_enabled: false
|
||||
calico_wireguard_packages: []
|
||||
calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-{{ ansible_distribution_major_version }}-$basearch/
|
||||
|
||||
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||
# * can-reach=DESTINATION
|
||||
# * interface=INTERFACE-REGEX
|
||||
# see https://projectcalico.docs.tigera.io/reference/node/configuration#ip-autodetection-methods
|
||||
# calico_ip_auto_method: "interface=eth.*"
|
||||
# calico_ip6_auto_method: "interface=eth.*"
|
||||
|
||||
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection.
|
||||
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
|
||||
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
|
||||
|
||||
calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}"
|
||||
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||
# The default value for calico_datastore is set in role kubespray-default
|
||||
|
||||
# Use typha (only with kdd)
|
||||
typha_enabled: false
|
||||
typha_prometheusmetricsenabled: false
|
||||
typha_prometheusmetricsport: 9093
|
||||
|
||||
# Scaling typha: 1 replica per 100 nodes is adequate
|
||||
# Number of typha replicas
|
||||
typha_replicas: 1
|
||||
|
||||
# Set max typha connections
|
||||
typha_max_connections_lower_limit: 300
|
||||
|
||||
# Generate certifcates for typha<->calico-node communication
|
||||
typha_secure: false
|
||||
|
||||
calico_feature_control: {}
|
||||
|
||||
# Calico default BGP port
|
||||
calico_bgp_listen_port: 179
|
||||
|
||||
# Calico FelixConfiguration options
|
||||
calico_felix_reporting_interval: 0s
|
||||
calico_felix_log_severity_screen: Info
|
||||
|
||||
# Calico container settings
|
||||
calico_allow_ip_forwarding: false
|
||||
|
||||
# Calico IPAM strictAffinity
|
||||
calico_ipam_strictaffinity: false
|
||||
|
||||
# Calico IPAM autoAllocateBlocks
|
||||
calico_ipam_autoallocateblocks: true
|
||||
|
||||
# Calico IPAM maxBlocksPerHost, default 0
|
||||
calico_ipam_maxblocksperhost: 0
|
||||
|
||||
# Calico apiserver (only with kdd)
|
||||
calico_apiserver_enabled: false
|
||||
|
||||
# Calico feature detect override, set "ChecksumOffloadBroken=true" to
|
||||
# solve the https://github.com/projectcalico/calico/issues/3145
|
||||
calico_feature_detect_override: ""
|
||||
@@ -0,0 +1,27 @@
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
|
||||
[req_distinguished_name]
|
||||
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
|
||||
[ ssl_client ]
|
||||
extendedKeyUsage = clientAuth, serverAuth
|
||||
basicConstraints = CA:FALSE
|
||||
subjectKeyIdentifier=hash
|
||||
authorityKeyIdentifier=keyid,issuer
|
||||
|
||||
[ v3_ca ]
|
||||
basicConstraints = CA:TRUE
|
||||
keyUsage = cRLSign, digitalSignature, keyCertSign
|
||||
subjectKeyIdentifier=hash
|
||||
authorityKeyIdentifier=keyid:always,issuer
|
||||
|
||||
[ ssl_client_apiserver ]
|
||||
extendedKeyUsage = clientAuth, serverAuth
|
||||
basicConstraints = CA:FALSE
|
||||
subjectKeyIdentifier=hash
|
||||
authorityKeyIdentifier=keyid,issuer
|
||||
subjectAltName = DNS:calico-api.calico-apiserver.svc
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: reset_calico_cni
|
||||
command: /bin/true
|
||||
when: calico_cni_config is defined
|
||||
notify:
|
||||
- delete 10-calico.conflist
|
||||
- Calico | delete calico-node docker containers
|
||||
- Calico | delete calico-node crio/containerd containers
|
||||
|
||||
- name: delete 10-calico.conflist
|
||||
file:
|
||||
path: /etc/cni/net.d/10-calico.conflist
|
||||
state: absent
|
||||
|
||||
- name: Calico | delete calico-node docker containers
|
||||
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||
register: docker_calico_node_remove
|
||||
until: docker_calico_node_remove is succeeded
|
||||
retries: 5
|
||||
when: container_manager in ["docker"]
|
||||
|
||||
- name: Calico | delete calico-node crio/containerd containers
|
||||
shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
|
||||
register: crictl_calico_node_remove
|
||||
until: crictl_calico_node_remove is succeeded
|
||||
retries: 5
|
||||
when: container_manager in ["crio", "containerd"]
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||
# should be the same as in calico role
|
||||
global_as_num: "64512"
|
||||
calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}"
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Calico-rr | Pre-upgrade tasks
|
||||
include_tasks: pre.yml
|
||||
|
||||
- name: Calico-rr | Configuring node tasks
|
||||
include_tasks: update-node.yml
|
||||
|
||||
- name: Calico-rr | Set label for route reflector # noqa 301
|
||||
command: >-
|
||||
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
|
||||
'i-am-a-route-reflector=true' --overwrite
|
||||
changed_when: false
|
||||
register: calico_rr_label
|
||||
until: calico_rr_label is succeeded
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
retries: 10
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Calico-rr | Disable calico-rr service if it exists
|
||||
service:
|
||||
name: calico-rr
|
||||
state: stopped
|
||||
enabled: no
|
||||
failed_when: false
|
||||
|
||||
- name: Calico-rr | Delete obsolete files
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- /etc/calico/calico-rr.env
|
||||
- /etc/systemd/system/calico-rr.service
|
||||
@@ -0,0 +1,48 @@
|
||||
---
|
||||
# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it,
|
||||
# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203
|
||||
- block:
|
||||
- name: Set the retry count
|
||||
set_fact:
|
||||
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
|
||||
|
||||
- name: Calico | Set label for route reflector # noqa 301 305
|
||||
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
|
||||
changed_when: false
|
||||
register: calico_rr_id_label
|
||||
until: calico_rr_id_label is succeeded
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
retries: 10
|
||||
when: calico_rr_id is defined
|
||||
|
||||
- name: Calico-rr | Fetch current node object
|
||||
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
|
||||
changed_when: false
|
||||
register: calico_rr_node
|
||||
until: calico_rr_node is succeeded
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
retries: 10
|
||||
|
||||
- name: Calico-rr | Set route reflector cluster ID
|
||||
set_fact:
|
||||
calico_rr_node_patched: >-
|
||||
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
|
||||
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
|
||||
|
||||
- name: Calico-rr | Configure route reflector # noqa 301 305
|
||||
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
|
||||
args:
|
||||
stdin: "{{ calico_rr_node_patched | to_json }}"
|
||||
|
||||
rescue:
|
||||
- name: Fail if retry limit is reached
|
||||
fail:
|
||||
msg: Ended after 10 retries
|
||||
when: retry_count|int == 10
|
||||
|
||||
- name: Retrying node configuration
|
||||
debug:
|
||||
msg: "Failed to configure route reflector - Retrying..."
|
||||
|
||||
- name: Retry node configuration
|
||||
include_tasks: update-node.yml
|
||||
@@ -0,0 +1,60 @@
|
||||
---
|
||||
- name: Calico | Check if calico apiserver exists
|
||||
command: "{{ kubectl }} -n calico-apiserver get secret calico-apiserver-certs"
|
||||
register: calico_apiserver_secret
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Calico | Create ns manifests
|
||||
template:
|
||||
src: "calico-apiserver-ns.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/calico-apiserver-ns.yml"
|
||||
mode: 0644
|
||||
|
||||
- name: Calico | Apply ns manifests
|
||||
kube:
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/calico-apiserver-ns.yml"
|
||||
state: "latest"
|
||||
|
||||
- name: Calico | Ensure calico certs dir
|
||||
file:
|
||||
path: /etc/calico/certs
|
||||
state: directory
|
||||
mode: 0755
|
||||
when: calico_apiserver_secret.rc != 0
|
||||
|
||||
- name: Calico | Copy ssl script for apiserver certs
|
||||
template:
|
||||
src: make-ssl-calico.sh.j2
|
||||
dest: "{{ bin_dir }}/make-ssl-apiserver.sh"
|
||||
mode: 0755
|
||||
when: calico_apiserver_secret.rc != 0
|
||||
|
||||
- name: Calico | Copy ssl config for apiserver certs
|
||||
copy:
|
||||
src: openssl.conf
|
||||
dest: /etc/calico/certs/openssl.conf
|
||||
mode: 0644
|
||||
when: calico_apiserver_secret.rc != 0
|
||||
|
||||
- name: Calico | Generate apiserver certs
|
||||
command: >-
|
||||
{{ bin_dir }}/make-ssl-apiserver.sh
|
||||
-f /etc/calico/certs/openssl.conf
|
||||
-c {{ kube_cert_dir }}
|
||||
-d /etc/calico/certs
|
||||
-s apiserver
|
||||
when: calico_apiserver_secret.rc != 0
|
||||
|
||||
- name: Calico | Create calico apiserver generic secrets
|
||||
command: >-
|
||||
{{ kubectl }} -n calico-apiserver
|
||||
create secret generic {{ item.name }}
|
||||
--from-file={{ item.cert }}
|
||||
--from-file={{ item.key }}
|
||||
with_items:
|
||||
- name: calico-apiserver-certs
|
||||
cert: /etc/calico/certs/apiserver.crt
|
||||
key: /etc/calico/certs/apiserver.key
|
||||
when: calico_apiserver_secret.rc != 0
|
||||
194
ansible/kubespray/roles/network_plugin/calico/tasks/check.yml
Normal file
194
ansible/kubespray/roles/network_plugin/calico/tasks/check.yml
Normal file
@@ -0,0 +1,194 @@
|
||||
---
|
||||
- name: Stop if legacy encapsulation variables are detected (ipip)
|
||||
assert:
|
||||
that:
|
||||
- ipip is not defined
|
||||
msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: Stop if legacy encapsulation variables are detected (ipip_mode)
|
||||
assert:
|
||||
that:
|
||||
- ipip_mode is not defined
|
||||
msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks)
|
||||
assert:
|
||||
that:
|
||||
- calcio_ipam_autoallocateblocks is not defined
|
||||
msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
|
||||
- name: Stop if incompatible network plugin and cloudprovider
|
||||
assert:
|
||||
that:
|
||||
- calico_ipip_mode == 'Never'
|
||||
- calico_vxlan_mode in ['Always', 'CrossSubnet']
|
||||
msg: "When using cloud_provider azure and network_plugin calico calico_ipip_mode must be 'Never' and calico_vxlan_mode 'Always' or 'CrossSubnet'"
|
||||
when:
|
||||
- cloud_provider is defined and cloud_provider == 'azure'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: Stop if supported Calico versions
|
||||
assert:
|
||||
that:
|
||||
- "calico_version in calico_crds_archive_checksums.keys()"
|
||||
msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.keys() }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: Get current calico cluster version
|
||||
shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: calico_version_on_server
|
||||
async: 10
|
||||
poll: 3
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Check that current calico version is enough for upgrade
|
||||
assert:
|
||||
that:
|
||||
- calico_version_on_server.stdout is version(calico_min_version_required, '>=')
|
||||
msg: >
|
||||
Your version of calico is not fresh enough for upgrade.
|
||||
Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release.
|
||||
when:
|
||||
- 'calico_version_on_server.stdout is defined'
|
||||
- calico_version_on_server.stdout
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check that cluster_id is set if calico_rr enabled"
|
||||
assert:
|
||||
that:
|
||||
- cluster_id is defined
|
||||
msg: "A unique cluster_id is required if using calico_rr"
|
||||
when:
|
||||
- peer_with_calico_rr
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check that calico_rr nodes are in k8s_cluster group"
|
||||
assert:
|
||||
that:
|
||||
- '"k8s_cluster" in group_names'
|
||||
msg: "calico_rr must be a child group of k8s_cluster group"
|
||||
when:
|
||||
- '"calico_rr" in group_names'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check vars defined correctly"
|
||||
assert:
|
||||
that:
|
||||
- "calico_pool_name is defined"
|
||||
- "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')"
|
||||
msg: "calico_pool_name contains invalid characters"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check calico network backend defined correctly"
|
||||
assert:
|
||||
that:
|
||||
- "calico_network_backend in ['bird', 'vxlan', 'none']"
|
||||
msg: "calico network backend is not 'bird', 'vxlan' or 'none'"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check ipip and vxlan mode defined correctly"
|
||||
assert:
|
||||
that:
|
||||
- "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']"
|
||||
- "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']"
|
||||
msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check ipip and vxlan mode if simultaneously enabled"
|
||||
assert:
|
||||
that:
|
||||
- "calico_vxlan_mode in ['Never']"
|
||||
msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
|
||||
when:
|
||||
- "calico_ipip_mode in ['Always', 'CrossSubnet']"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check ipip and vxlan mode if simultaneously enabled"
|
||||
assert:
|
||||
that:
|
||||
- "calico_ipip_mode in ['Never']"
|
||||
msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
|
||||
when:
|
||||
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Get Calico {{ calico_pool_name }} configuration"
|
||||
command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json"
|
||||
failed_when: False
|
||||
changed_when: False
|
||||
check_mode: no
|
||||
register: calico
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Set calico_pool_conf"
|
||||
set_fact:
|
||||
calico_pool_conf: '{{ calico.stdout | from_json }}'
|
||||
when: calico.rc == 0 and calico.stdout
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check if inventory match current cluster configuration"
|
||||
assert:
|
||||
that:
|
||||
- calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int)
|
||||
- calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet))
|
||||
- not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode
|
||||
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode
|
||||
msg: "Your inventory doesn't match the current cluster configuration"
|
||||
when:
|
||||
- calico_pool_conf is defined
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check kdd calico_datastore if calico_apiserver_enabled"
|
||||
assert:
|
||||
that: calico_datastore == "kdd"
|
||||
msg: "When using calico apiserver you need to use the kubernetes datastore"
|
||||
when:
|
||||
- calico_apiserver_enabled
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check kdd calico_datastore if typha_enabled"
|
||||
assert:
|
||||
that: calico_datastore == "kdd"
|
||||
msg: "When using typha you need to use the kubernetes datastore"
|
||||
when:
|
||||
- typha_enabled
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
|
||||
- name: "Check ipip mode is Never for calico ipv6"
|
||||
assert:
|
||||
that:
|
||||
- "calico_ipip_mode_ipv6 in ['Never']"
|
||||
msg: "Calico doesn't support ipip tunneling for the IPv6"
|
||||
when:
|
||||
- enable_dual_stack_networks
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
475
ansible/kubespray/roles/network_plugin/calico/tasks/install.yml
Normal file
475
ansible/kubespray/roles/network_plugin/calico/tasks/install.yml
Normal file
@@ -0,0 +1,475 @@
|
||||
---
|
||||
- name: Calico | Install Wireguard packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ calico_wireguard_packages }}"
|
||||
register: calico_package_install
|
||||
until: calico_package_install is succeeded
|
||||
retries: 4
|
||||
when: calico_wireguard_enabled
|
||||
|
||||
- name: Calico | Copy calicoctl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/calicoctl"
|
||||
dest: "{{ bin_dir }}/calicoctl"
|
||||
mode: 0755
|
||||
remote_src: yes
|
||||
|
||||
- name: Calico | Write Calico cni config
|
||||
template:
|
||||
src: "cni-calico.conflist.j2"
|
||||
dest: "/etc/cni/net.d/calico.conflist.template"
|
||||
mode: 0644
|
||||
owner: root
|
||||
register: calico_conflist
|
||||
notify: reset_calico_cni
|
||||
|
||||
- name: Calico | Create calico certs directory
|
||||
file:
|
||||
dest: "{{ calico_cert_dir }}"
|
||||
state: directory
|
||||
mode: 0750
|
||||
owner: root
|
||||
group: root
|
||||
when: calico_datastore == "etcd"
|
||||
|
||||
- name: Calico | Link etcd certificates for calico-node
|
||||
file:
|
||||
src: "{{ etcd_cert_dir }}/{{ item.s }}"
|
||||
dest: "{{ calico_cert_dir }}/{{ item.d }}"
|
||||
state: hard
|
||||
mode: 0640
|
||||
force: yes
|
||||
with_items:
|
||||
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
||||
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
||||
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
||||
when: calico_datastore == "etcd"
|
||||
|
||||
- name: Calico | Generate typha certs
|
||||
include_tasks: typha_certs.yml
|
||||
when:
|
||||
- typha_secure
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Calico | Generate apiserver certs
|
||||
include_tasks: calico_apiserver_certs.yml
|
||||
when:
|
||||
- calico_apiserver_enabled
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Calico | Install calicoctl wrapper script
|
||||
template:
|
||||
src: "calicoctl.{{ calico_datastore }}.sh.j2"
|
||||
dest: "{{ bin_dir }}/calicoctl.sh"
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Calico | wait for etcd
|
||||
uri:
|
||||
url: "{{ etcd_access_addresses.split(',') | first }}/health"
|
||||
validate_certs: no
|
||||
client_cert: "{{ calico_cert_dir }}/cert.crt"
|
||||
client_key: "{{ calico_cert_dir }}/key.pem"
|
||||
register: result
|
||||
until: result.status == 200 or result.status == 401
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: true
|
||||
when: calico_datastore == "etcd"
|
||||
|
||||
- name: Calico | Check if calico network pool has already been configured
|
||||
# noqa 306 - grep will exit 1 if no match found
|
||||
shell: >
|
||||
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: calico_conf
|
||||
retries: 4
|
||||
until: calico_conf.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
|
||||
assert:
|
||||
that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1"
|
||||
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- 'calico_conf.stdout == "0"'
|
||||
- calico_pool_cidr is defined
|
||||
|
||||
- name: Calico | Check if calico IPv6 network pool has already been configured
|
||||
# noqa 306 - grep will exit 1 if no match found
|
||||
shell: >
|
||||
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: calico_conf_ipv6
|
||||
retries: 4
|
||||
until: calico_conf_ipv6.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- enable_dual_stack_networks
|
||||
|
||||
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
|
||||
assert:
|
||||
that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1"
|
||||
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
|
||||
- calico_pool_cidr_ipv6 is defined
|
||||
- enable_dual_stack_networks
|
||||
|
||||
- block:
|
||||
- name: Calico | Check if extra directory is needed
|
||||
stat:
|
||||
path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}"
|
||||
register: kdd_path
|
||||
- name: Calico | Set kdd path when calico < v3.22.3
|
||||
set_fact:
|
||||
calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}"
|
||||
when:
|
||||
- calico_version is version('v3.22.3', '<')
|
||||
- name: Calico | Set kdd path when calico > v3.22.2
|
||||
set_fact:
|
||||
calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}"
|
||||
when:
|
||||
- calico_version is version('v3.22.2', '>')
|
||||
- name: Calico | Create calico manifests for kdd
|
||||
assemble:
|
||||
src: "{{ calico_kdd_path }}"
|
||||
dest: "{{ kube_config_dir }}/kdd-crds.yml"
|
||||
mode: 0644
|
||||
delimiter: "---\n"
|
||||
regexp: ".*\\.yaml"
|
||||
remote_src: true
|
||||
|
||||
- name: Calico | Create Calico Kubernetes datastore resources
|
||||
kube:
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/kdd-crds.yml"
|
||||
state: "latest"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- calico_datastore == "kdd"
|
||||
|
||||
- block:
|
||||
- name: Calico | Get existing FelixConfiguration
|
||||
command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
|
||||
register: _felix_cmd
|
||||
ignore_errors: True
|
||||
changed_when: False
|
||||
|
||||
- name: Calico | Set kubespray FelixConfiguration
|
||||
set_fact:
|
||||
_felix_config: >
|
||||
{
|
||||
"kind": "FelixConfiguration",
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
"metadata": {
|
||||
"name": "default",
|
||||
},
|
||||
"spec": {
|
||||
"ipipEnabled": {{ calico_ipip_mode != 'Never' }},
|
||||
"reportingInterval": "{{ calico_felix_reporting_interval }}",
|
||||
"bpfLogLevel": "{{ calico_bpf_log_level }}",
|
||||
"bpfEnabled": {{ calico_bpf_enabled | bool }},
|
||||
"bpfExternalServiceMode": "{{ calico_bpf_service_mode }}",
|
||||
"wireguardEnabled": {{ calico_wireguard_enabled | bool }},
|
||||
"logSeverityScreen": "{{ calico_felix_log_severity_screen }}",
|
||||
"vxlanEnabled": {{ calico_vxlan_mode != 'Never' }},
|
||||
"featureDetectOverride": "{{ calico_feature_detect_override }}"
|
||||
}
|
||||
}
|
||||
|
||||
- name: Calico | Process FelixConfiguration
|
||||
set_fact:
|
||||
_felix_config: "{{ _felix_cmd.stdout | from_json | combine(_felix_config, recursive=True) }}"
|
||||
when:
|
||||
- _felix_cmd is success
|
||||
|
||||
- name: Calico | Configure calico FelixConfiguration
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}"
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- block:
|
||||
- name: Calico | Get existing calico network pool
|
||||
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
|
||||
register: _calico_pool_cmd
|
||||
ignore_errors: True
|
||||
changed_when: False
|
||||
|
||||
- name: Calico | Set kubespray calico network pool
|
||||
set_fact:
|
||||
_calico_pool: >
|
||||
{
|
||||
"kind": "IPPool",
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
"metadata": {
|
||||
"name": "{{ calico_pool_name }}",
|
||||
},
|
||||
"spec": {
|
||||
"blockSize": {{ calico_pool_blocksize | default(kube_network_node_prefix) }},
|
||||
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
|
||||
"ipipMode": "{{ calico_ipip_mode }}",
|
||||
"vxlanMode": "{{ calico_vxlan_mode }}",
|
||||
"natOutgoing": {{ nat_outgoing|default(false) }}
|
||||
}
|
||||
}
|
||||
|
||||
- name: Calico | Process calico network pool
|
||||
set_fact:
|
||||
_calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, recursive=True) }}"
|
||||
when:
|
||||
- _calico_pool_cmd is success
|
||||
|
||||
- name: Calico | Configure calico network pool
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}"
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- block:
|
||||
- name: Calico | Get existing calico ipv6 network pool
|
||||
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
|
||||
register: _calico_pool_ipv6_cmd
|
||||
ignore_errors: True
|
||||
changed_when: False
|
||||
|
||||
- name: Calico | Set kubespray calico network pool
|
||||
set_fact:
|
||||
_calico_pool_ipv6: >
|
||||
{
|
||||
"kind": "IPPool",
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
"metadata": {
|
||||
"name": "{{ calico_pool_name }}-ipv6",
|
||||
},
|
||||
"spec": {
|
||||
"blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }},
|
||||
"cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
|
||||
"ipipMode": "{{ calico_ipip_mode_ipv6 }}",
|
||||
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
|
||||
"natOutgoing": {{ nat_outgoing_ipv6|default(false) }}
|
||||
}
|
||||
}
|
||||
|
||||
- name: Calico | Process calico ipv6 network pool
|
||||
set_fact:
|
||||
_calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, recursive=True) }}"
|
||||
when:
|
||||
- _calico_pool_ipv6_cmd is success
|
||||
|
||||
- name: Calico | Configure calico ipv6 network pool
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}"
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- enable_dual_stack_networks | bool
|
||||
|
||||
- name: Populate Service External IPs
|
||||
set_fact:
|
||||
_service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}"
|
||||
with_items: "{{ calico_advertise_service_external_ips }}"
|
||||
run_once: yes
|
||||
|
||||
- name: Populate Service LoadBalancer IPs
|
||||
set_fact:
|
||||
_service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}"
|
||||
with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
|
||||
run_once: yes
|
||||
|
||||
- name: "Determine nodeToNodeMesh needed state"
|
||||
set_fact:
|
||||
nodeToNodeMeshEnabled: "false"
|
||||
when:
|
||||
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
run_once: yes
|
||||
|
||||
- block:
|
||||
- name: Calico | Get existing BGP Configuration
|
||||
command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
|
||||
register: _bgp_config_cmd
|
||||
ignore_errors: True
|
||||
changed_when: False
|
||||
|
||||
- name: Calico | Set kubespray BGP Configuration
|
||||
set_fact:
|
||||
_bgp_config: >
|
||||
{
|
||||
"kind": "BGPConfiguration",
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
"metadata": {
|
||||
"name": "default",
|
||||
},
|
||||
"spec": {
|
||||
"listenPort": {{ calico_bgp_listen_port }},
|
||||
"logSeverityScreen": "Info",
|
||||
{% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %}
|
||||
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
|
||||
{% if calico_advertise_cluster_ips|default(false) %}
|
||||
"serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %}
|
||||
{% if calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
|
||||
"serviceExternalIPs": {{ _service_external_ips|default([]) }}
|
||||
}
|
||||
}
|
||||
|
||||
- name: Calico | Process BGP Configuration
|
||||
set_fact:
|
||||
_bgp_config: "{{ _bgp_config_cmd.stdout | from_json | combine(_bgp_config, recursive=True) }}"
|
||||
when:
|
||||
- _bgp_config_cmd is success
|
||||
|
||||
- name: Calico | Set up BGP Configuration
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}"
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Calico | Create calico manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- {name: calico-config, file: calico-config.yml, type: cm}
|
||||
- {name: calico-node, file: calico-node.yml, type: ds}
|
||||
- {name: calico, file: calico-node-sa.yml, type: sa}
|
||||
- {name: calico, file: calico-cr.yml, type: clusterrole}
|
||||
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
|
||||
- {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm }
|
||||
register: calico_node_manifests
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- rbac_enabled or item.type not in rbac_resources
|
||||
|
||||
- name: Calico | Create calico manifests for typha
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- {name: calico, file: calico-typha.yml, type: typha}
|
||||
register: calico_node_typha_manifest
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- typha_enabled
|
||||
|
||||
- name: Calico | get calico apiserver caBundle
|
||||
command: "{{ bin_dir }}/kubectl get secret -n calico-apiserver calico-apiserver-certs -o jsonpath='{.data.apiserver\\.crt}'"
|
||||
changed_when: false
|
||||
register: calico_apiserver_cabundle
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- calico_apiserver_enabled
|
||||
|
||||
- name: Calico | set calico apiserver caBundle fact
|
||||
set_fact:
|
||||
calico_apiserver_cabundle: "{{ calico_apiserver_cabundle.stdout }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- calico_apiserver_enabled
|
||||
|
||||
- name: Calico | Create calico manifests for apiserver
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- {name: calico, file: calico-apiserver.yml, type: calico-apiserver}
|
||||
register: calico_apiserver_manifest
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- calico_apiserver_enabled
|
||||
|
||||
- name: Start Calico resources
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
with_items:
|
||||
- "{{ calico_node_manifests.results }}"
|
||||
- "{{ calico_node_typha_manifest.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- not item is skipped
|
||||
loop_control:
|
||||
label: "{{ item.item.file }}"
|
||||
|
||||
- name: Start Calico apiserver resources
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "calico-apiserver"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
with_items:
|
||||
- "{{ calico_apiserver_manifest.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- not item is skipped
|
||||
loop_control:
|
||||
label: "{{ item.item.file }}"
|
||||
|
||||
- name: Wait for calico kubeconfig to be created
|
||||
wait_for:
|
||||
path: /etc/cni/net.d/calico-kubeconfig
|
||||
when:
|
||||
- inventory_hostname not in groups['kube_control_plane']
|
||||
- calico_datastore == "kdd"
|
||||
|
||||
- name: Calico | Create Calico ipam manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- {name: calico, file: calico-ipamconfig.yml, type: ipam}
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- calico_datastore == "kdd"
|
||||
|
||||
- name: Calico | Create ipamconfig resources
|
||||
kube:
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/calico-ipamconfig.yml"
|
||||
state: "latest"
|
||||
register: resource_result
|
||||
until: resource_result is succeeded
|
||||
retries: 4
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- calico_datastore == "kdd"
|
||||
|
||||
- include_tasks: peer_with_calico_rr.yml
|
||||
when:
|
||||
- peer_with_calico_rr|default(false)
|
||||
|
||||
- include_tasks: peer_with_router.yml
|
||||
when:
|
||||
- peer_with_router|default(false)
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- import_tasks: pre.yml
|
||||
|
||||
- import_tasks: repos.yml
|
||||
|
||||
- include_tasks: install.yml
|
||||
@@ -0,0 +1,86 @@
|
||||
---
|
||||
- name: Calico | Set lable for groups nodes # noqa 301 305
|
||||
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
|
||||
changed_when: false
|
||||
register: calico_group_id_label
|
||||
until: calico_group_id_label is succeeded
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
retries: 10
|
||||
when:
|
||||
- calico_group_id is defined
|
||||
|
||||
- name: Calico | Configure peering with route reflectors at global scope
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
# revert when it's already a string
|
||||
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
||||
vars:
|
||||
stdin: >
|
||||
{"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "{{ calico_rr_id }}-to-node"
|
||||
},
|
||||
"spec": {
|
||||
"peerSelector": "calico-rr-id == '{{ calico_rr_id }}'",
|
||||
"nodeSelector": "calico-group-id == '{{ calico_group_id }}'"
|
||||
}}
|
||||
register: output
|
||||
retries: 4
|
||||
until: output.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- calico_rr_id is defined
|
||||
- calico_group_id is defined
|
||||
- inventory_hostname in groups['calico_rr']
|
||||
|
||||
- name: Calico | Configure peering with route reflectors at global scope
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
# revert when it's already a string
|
||||
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
||||
vars:
|
||||
stdin: >
|
||||
{"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "peer-to-rrs"
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": "!has(i-am-a-route-reflector)",
|
||||
"peerSelector": "has(i-am-a-route-reflector)"
|
||||
}}
|
||||
register: output
|
||||
retries: 4
|
||||
until: output.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items:
|
||||
- "{{ groups['calico_rr'] | default([]) }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- calico_rr_id is not defined or calico_group_id is not defined
|
||||
|
||||
- name: Calico | Configure route reflectors to peer with each other
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
# revert when it's already a string
|
||||
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
||||
vars:
|
||||
stdin: >
|
||||
{"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "rr-mesh"
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": "has(i-am-a-route-reflector)",
|
||||
"peerSelector": "has(i-am-a-route-reflector)"
|
||||
}}
|
||||
register: output
|
||||
retries: 4
|
||||
until: output.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items:
|
||||
- "{{ groups['calico_rr'] | default([]) }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
@@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: Calico | Configure peering with router(s) at global scope
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
||||
vars:
|
||||
stdin: >
|
||||
{"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}"
|
||||
},
|
||||
"spec": {
|
||||
"asNumber": "{{ item.as }}",
|
||||
"peerIP": "{{ item.router_id }}"
|
||||
}}
|
||||
register: output
|
||||
retries: 4
|
||||
until: output.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items:
|
||||
- "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Calico | Configure node asNumber for per node peering
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
||||
vars:
|
||||
stdin: >
|
||||
{"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"name": "{{ inventory_hostname }}"
|
||||
},
|
||||
"spec": {
|
||||
"bgp": {
|
||||
"asNumber": "{{ local_as }}"
|
||||
},
|
||||
"orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}]
|
||||
}}
|
||||
register: output
|
||||
retries: 4
|
||||
until: output.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
- local_as is defined
|
||||
- groups['calico_rr'] | default([]) | length == 0
|
||||
|
||||
- name: Calico | Configure peering with router(s) at node scope
|
||||
command:
|
||||
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
||||
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
||||
vars:
|
||||
stdin: >
|
||||
{"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}"
|
||||
},
|
||||
"spec": {
|
||||
"asNumber": "{{ item.as }}",
|
||||
"node": "{{ inventory_hostname }}",
|
||||
"peerIP": "{{ item.router_id }}",
|
||||
"sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}"
|
||||
}}
|
||||
register: output
|
||||
retries: 4
|
||||
until: output.rc == 0
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items:
|
||||
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
46
ansible/kubespray/roles/network_plugin/calico/tasks/pre.yml
Normal file
46
ansible/kubespray/roles/network_plugin/calico/tasks/pre.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
- name: Slurp CNI config
|
||||
slurp:
|
||||
src: /etc/cni/net.d/10-calico.conflist
|
||||
register: calico_cni_config_slurp
|
||||
failed_when: false
|
||||
|
||||
- block:
|
||||
- name: Set fact calico_cni_config from slurped CNI config
|
||||
set_fact:
|
||||
calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}"
|
||||
- name: Set fact calico_datastore to etcd if needed
|
||||
set_fact:
|
||||
calico_datastore: etcd
|
||||
when:
|
||||
- "'plugins' in calico_cni_config"
|
||||
- "'etcd_endpoints' in calico_cni_config.plugins.0"
|
||||
when: calico_cni_config_slurp.content is defined
|
||||
|
||||
- name: Calico | Get kubelet hostname
|
||||
shell: >-
|
||||
set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
||||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: calico_kubelet_name
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
when:
|
||||
- "cloud_provider is defined"
|
||||
|
||||
- name: Calico | Gather os specific variables
|
||||
include_vars: "{{ item }}"
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}.yml"
|
||||
- "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
|
||||
- "{{ ansible_os_family|lower }}.yml"
|
||||
- defaults.yml
|
||||
paths:
|
||||
- ../vars
|
||||
skip: true
|
||||
tags:
|
||||
- facts
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Calico | Add wireguard yum repo
|
||||
when:
|
||||
- calico_wireguard_enabled
|
||||
block:
|
||||
|
||||
- name: Calico | Add wireguard yum repo
|
||||
yum_repository:
|
||||
name: copr:copr.fedorainfracloud.org:jdoss:wireguard
|
||||
file: _copr:copr.fedorainfracloud.org:jdoss:wireguard
|
||||
description: Copr repo for wireguard owned by jdoss
|
||||
baseurl: "{{ calico_wireguard_repo }}"
|
||||
gpgcheck: yes
|
||||
gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg
|
||||
skip_if_unavailable: yes
|
||||
enabled: yes
|
||||
repo_gpgcheck: no
|
||||
when:
|
||||
- ansible_os_family in ['RedHat']
|
||||
- ansible_distribution not in ['Fedora']
|
||||
- ansible_facts['distribution_major_version'] | int < 9
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: reset | check vxlan.calico network device
|
||||
stat:
|
||||
path: /sys/class/net/vxlan.calico
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: vxlan
|
||||
|
||||
- name: reset | remove the network vxlan.calico device created by calico
|
||||
command: ip link del vxlan.calico
|
||||
when: vxlan.stat.exists
|
||||
|
||||
- name: reset | check dummy0 network device
|
||||
stat:
|
||||
path: /sys/class/net/dummy0
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: dummy0
|
||||
|
||||
- name: reset | remove the network device created by calico
|
||||
command: ip link del dummy0
|
||||
when: dummy0.stat.exists
|
||||
|
||||
- name: reset | get and remove remaining routes set by bird
|
||||
shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird "
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
@@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Calico | Check if typha-server exists
|
||||
command: "{{ kubectl }} -n kube-system get secret typha-server"
|
||||
register: typha_server_secret
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Calico | Ensure calico certs dir
|
||||
file:
|
||||
path: /etc/calico/certs
|
||||
state: directory
|
||||
mode: 0755
|
||||
when: typha_server_secret.rc != 0
|
||||
|
||||
- name: Calico | Copy ssl script for typha certs
|
||||
template:
|
||||
src: make-ssl-calico.sh.j2
|
||||
dest: "{{ bin_dir }}/make-ssl-typha.sh"
|
||||
mode: 0755
|
||||
when: typha_server_secret.rc != 0
|
||||
|
||||
- name: Calico | Copy ssl config for typha certs
|
||||
copy:
|
||||
src: openssl.conf
|
||||
dest: /etc/calico/certs/openssl.conf
|
||||
mode: 0644
|
||||
when: typha_server_secret.rc != 0
|
||||
|
||||
- name: Calico | Generate typha certs
|
||||
command: >-
|
||||
{{ bin_dir }}/make-ssl-typha.sh
|
||||
-f /etc/calico/certs/openssl.conf
|
||||
-c {{ kube_cert_dir }}
|
||||
-d /etc/calico/certs
|
||||
-s typha
|
||||
when: typha_server_secret.rc != 0
|
||||
|
||||
- name: Calico | Create typha tls secrets
|
||||
command: >-
|
||||
{{ kubectl }} -n kube-system
|
||||
create secret tls {{ item.name }}
|
||||
--cert {{ item.cert }}
|
||||
--key {{ item.key }}
|
||||
with_items:
|
||||
- name: typha-server
|
||||
cert: /etc/calico/certs/typha-server.crt
|
||||
key: /etc/calico/certs/typha-server.key
|
||||
- name: typha-client
|
||||
cert: /etc/calico/certs/typha-client.crt
|
||||
key: /etc/calico/certs/typha-client.key
|
||||
when: typha_server_secret.rc != 0
|
||||
@@ -0,0 +1,10 @@
|
||||
# This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change
|
||||
# or be removed in future releases without further warning.
|
||||
#
|
||||
# Namespace and namespace-scoped resources.
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
name: calico-apiserver
|
||||
name: calico-apiserver
|
||||
@@ -0,0 +1,287 @@
|
||||
# Policy to ensure the API server isn't cut off. Can be modified, but ensure
|
||||
# that the main API server is always able to reach the Calico API server.
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-apiserver
|
||||
namespace: calico-apiserver
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
apiserver: "true"
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 5443
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calico-api
|
||||
namespace: calico-apiserver
|
||||
spec:
|
||||
ports:
|
||||
- name: apiserver
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 5443
|
||||
selector:
|
||||
apiserver: "true"
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
apiserver: "true"
|
||||
k8s-app: calico-apiserver
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
apiserver: "true"
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
apiserver: "true"
|
||||
k8s-app: calico-apiserver
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --secure-port=5443
|
||||
env:
|
||||
- name: DATASTORE_TYPE
|
||||
value: kubernetes
|
||||
image: {{ calico_apiserver_image_repo }}:{{ calico_apiserver_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /version
|
||||
port: 5443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 90
|
||||
periodSeconds: 10
|
||||
name: calico-apiserver
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /code/filecheck
|
||||
failureThreshold: 5
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
privileged: false
|
||||
runAsUser: 0
|
||||
volumeMounts:
|
||||
- mountPath: /code/apiserver.local.config/certificates
|
||||
name: calico-apiserver-certs
|
||||
dnsPolicy: ClusterFirst
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
restartPolicy: Always
|
||||
serviceAccount: calico-apiserver
|
||||
serviceAccountName: calico-apiserver
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
volumes:
|
||||
- name: calico-apiserver-certs
|
||||
secret:
|
||||
secretName: calico-apiserver-certs
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
|
||||
---
|
||||
|
||||
# Cluster-scoped resources below here.
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v3.projectcalico.org
|
||||
spec:
|
||||
group: projectcalico.org
|
||||
groupPriorityMinimum: 1500
|
||||
caBundle: {{ calico_apiserver_cabundle }}
|
||||
service:
|
||||
name: calico-api
|
||||
namespace: calico-apiserver
|
||||
port: 443
|
||||
version: v3
|
||||
versionPriority: 200
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-crds
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
- ""
|
||||
resources:
|
||||
- networkpolicies
|
||||
- nodes
|
||||
- namespaces
|
||||
- pods
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- globalnetworkpolicies
|
||||
- networkpolicies
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- globalnetworksets
|
||||
- networksets
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
- felixconfigurations
|
||||
- kubecontrollersconfigurations
|
||||
- ippools
|
||||
- ipreservations
|
||||
- ipamblocks
|
||||
- blockaffinities
|
||||
- caliconodestatuses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- calico-apiserver
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-extension-apiserver-auth-access
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- extension-apiserver-authentication
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- clusterroles
|
||||
- clusterrolebindings
|
||||
- roles
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-webhook-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-apiserver-access-crds
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-crds
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-apiserver-delegate-auth
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-apiserver-webhook-reader
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-webhook-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-extension-apiserver-auth-access
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-extension-apiserver-auth-access
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-apiserver
|
||||
namespace: calico-apiserver
|
||||
@@ -0,0 +1,27 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
{% if calico_datastore == "etcd" %}
|
||||
etcd_endpoints: "{{ etcd_access_addresses }}"
|
||||
etcd_ca: "/calico-secrets/ca_cert.crt"
|
||||
etcd_cert: "/calico-secrets/cert.crt"
|
||||
etcd_key: "/calico-secrets/key.pem"
|
||||
{% elif calico_datastore == "kdd" and typha_enabled %}
|
||||
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas
|
||||
# below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is
|
||||
# essential.
|
||||
typha_service_name: "calico-typha"
|
||||
{% endif %}
|
||||
{% if calico_network_backend == 'bird' %}
|
||||
cluster_type: "kubespray,bgp"
|
||||
calico_backend: "bird"
|
||||
{% else %}
|
||||
cluster_type: "kubespray"
|
||||
calico_backend: "{{ calico_network_backend }}"
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %}
|
||||
as: "{{ local_as|default(global_as_num) }}"
|
||||
{% endif -%}
|
||||
@@ -0,0 +1,168 @@
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
# EndpointSlices are used for Service-based network policy rule
|
||||
# enforcement.
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
{% if calico_datastore == "kdd" %}
|
||||
# Used to discover Typhas.
|
||||
- get
|
||||
{% endif %}
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Needed for clearing NodeNetworkUnavailable flag.
|
||||
- patch
|
||||
{% if calico_datastore == "etcd" %}
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- privileged
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
{% elif calico_datastore == "kdd" %}
|
||||
# Calico stores some configuration information in node annotations.
|
||||
- update
|
||||
# Watch for changes to Kubernetes NetworkPolicies.
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# The CNI plugin patches pods/status.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico monitors various CRDs for config.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipreservations
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
- caliconodestatuses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico must create and update some CRDs on startup.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico must update some CRDs.
|
||||
- apiGroups: [ "crd.projectcalico.org" ]
|
||||
resources:
|
||||
- caliconodestatuses
|
||||
verbs:
|
||||
- update
|
||||
# Calico stores some configuration information on the node.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# These permissions are only required for upgrade from v2.6, and can
|
||||
# be removed after upgrade or on fresh installations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# These permissions are required for Calico CNI to perform IPAM allocations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
# Block affinities must also be watchable by confd for route aggregation.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
# The Calico IPAM migration needs to get daemonsets. These permissions can be
|
||||
# removed if not upgrading from an installation using host-local IPAM.
|
||||
- apiGroups: ["apps"]
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
{% endif %}
|
||||
# Used for creating service account tokens to be used by the CNI plugin
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- serviceaccounts/token
|
||||
resourceNames:
|
||||
- calico-node
|
||||
verbs:
|
||||
- create
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,8 @@
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: IPAMConfig
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
autoAllocateBlocks: {{ calico_ipam_autoallocateblocks }}
|
||||
strictAffinity: {{ calico_ipam_strictaffinity }}
|
||||
maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }}
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,464 @@
|
||||
---
|
||||
# This manifest installs the calico/node container, as well
|
||||
# as the Calico CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
{% if calico_datastore == "etcd" %}
|
||||
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
|
||||
{% endif %}
|
||||
{% if calico_felix_prometheusmetricsenabled %}
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}"
|
||||
{% endif %}
|
||||
spec:
|
||||
nodeSelector:
|
||||
{{ calico_ds_nodeselector }}
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico-node
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
initContainers:
|
||||
{% if calico_datastore == "kdd" %}
|
||||
# This container performs upgrade from host-local IPAM to calico-ipam.
|
||||
# It can be deleted if this is a fresh installation, or if you have already
|
||||
# upgraded to use calico-ipam.
|
||||
- name: upgrade-ipam
|
||||
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/cni/networks
|
||||
name: host-local-net-dir
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% endif %}
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command: ["/opt/cni/bin/install"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# Install CNI binaries
|
||||
- name: UPDATE_CNI_BINARIES
|
||||
value: "true"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG_FILE
|
||||
value: "/host/etc/cni/net.d/calico.conflist.template"
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
{% if calico_datastore == "kdd" %}
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
{% endif %}
|
||||
volumeMounts:
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: {{ calico_flexvol_image_repo }}:{{ calico_flexvol_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
{% if calico_datastore == "etcd" %}
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# Location of the CA certificate for etcd.
|
||||
- name: ETCD_CA_CERT_FILE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_ca
|
||||
# Location of the client key for etcd.
|
||||
- name: ETCD_KEY_FILE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_key
|
||||
# Location of the client certificate for etcd.
|
||||
- name: ETCD_CERT_FILE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_cert
|
||||
{% elif calico_datastore == "kdd" %}
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
{% if typha_enabled %}
|
||||
# Typha support: controlled by the ConfigMap.
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: typha_service_name
|
||||
{% if typha_secure %}
|
||||
- name: FELIX_TYPHACN
|
||||
value: typha-server
|
||||
- name: FELIX_TYPHACAFILE
|
||||
value: /etc/typha-ca/ca.crt
|
||||
- name: FELIX_TYPHACERTFILE
|
||||
value: /etc/typha-client/typha-client.crt
|
||||
- name: FELIX_TYPHAKEYFILE
|
||||
value: /etc/typha-client/typha-client.key
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
{% endif %}
|
||||
{% if calico_network_backend == 'vxlan' %}
|
||||
- name: FELIX_VXLANVNI
|
||||
value: "{{ calico_vxlan_vni }}"
|
||||
- name: FELIX_VXLANPORT
|
||||
value: "{{ calico_vxlan_port }}"
|
||||
{% endif %}
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cluster_type
|
||||
# Set noderef for node controller.
|
||||
- name: CALICO_K8S_NODE_REF
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "{{ calico_endpoint_to_host_action|default('RETURN') }}"
|
||||
- name: FELIX_HEALTHHOST
|
||||
value: "{{ calico_healthhost }}"
|
||||
{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %}
|
||||
- name: FELIX_KUBENODEPORTRANGES
|
||||
value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}"
|
||||
{% endif %}
|
||||
- name: FELIX_IPTABLESBACKEND
|
||||
value: "{{ calico_iptables_backend }}"
|
||||
- name: FELIX_IPTABLESLOCKTIMEOUTSECS
|
||||
value: "{{ calico_iptables_lock_timeout_secs }}"
|
||||
# should be set in etcd before deployment
|
||||
# # Configure the IP Pool from which Pod IPs will be chosen.
|
||||
# - name: CALICO_IPV4POOL_CIDR
|
||||
# value: "{{ calico_pool_cidr | default(kube_pods_subnet) }}"
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "{{ calico_ipv4pool_ipip }}"
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "{{ enable_dual_stack_networks | default(false) }}"
|
||||
# Set Felix logging to "info"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "{{ calico_loglevel }}"
|
||||
# Set Calico startup logging to "error"
|
||||
- name: CALICO_STARTUP_LOGLEVEL
|
||||
value: "{{ calico_node_startup_loglevel }}"
|
||||
# Enable or disable usage report
|
||||
- name: FELIX_USAGEREPORTINGENABLED
|
||||
value: "{{ calico_usage_reporting }}"
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
{% if calico_mtu is defined %}
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
||||
# Set MTU for the VXLAN tunnel device.
|
||||
- name: FELIX_VXLANMTU
|
||||
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
||||
# Set MTU for the Wireguard tunnel device.
|
||||
- name: FELIX_WIREGUARDMTU
|
||||
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
|
||||
{% endif %}
|
||||
- name: FELIX_CHAININSERTMODE
|
||||
value: "{{ calico_felix_chaininsertmode }}"
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "{{ calico_felix_prometheusmetricsenabled }}"
|
||||
- name: FELIX_PROMETHEUSMETRICSPORT
|
||||
value: "{{ calico_felix_prometheusmetricsport }}"
|
||||
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
||||
value: "{{ calico_felix_prometheusgometricsenabled }}"
|
||||
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
||||
value: "{{ calico_felix_prometheusprocessmetricsenabled }}"
|
||||
{% if calico_ip_auto_method is defined %}
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "{{ calico_ip_auto_method }}"
|
||||
{% else %}
|
||||
- name: NODEIP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "can-reach=$(NODEIP)"
|
||||
{% endif %}
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %}
|
||||
- name: IP6_AUTODETECTION_METHOD
|
||||
value: "{{ calico_ip6_auto_method }}"
|
||||
{% endif %}
|
||||
{% if calico_felix_mtu_iface_pattern is defined %}
|
||||
- name: FELIX_MTUIFACEPATTERN
|
||||
value: "{{ calico_felix_mtu_iface_pattern }}"
|
||||
{% endif %}
|
||||
{% if enable_dual_stack_networks %}
|
||||
- name: IP6
|
||||
value: autodetect
|
||||
{% endif %}
|
||||
{% if calico_use_default_route_src_ipaddr|default(false) %}
|
||||
- name: FELIX_DEVICEROUTESOURCEADDRESS
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
{% endif %}
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: FELIX_IGNORELOOSERPF
|
||||
value: "{{ calico_node_ignorelooserpf }}"
|
||||
- name: CALICO_MANAGE_CNI
|
||||
value: "true"
|
||||
{% if calico_node_extra_envs is defined %}
|
||||
{% for key in calico_node_extra_envs %}
|
||||
- name: {{ key }}
|
||||
value: "{{ calico_node_extra_envs[key] }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ calico_node_cpu_limit }}
|
||||
memory: {{ calico_node_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ calico_node_cpu_requests }}
|
||||
memory: {{ calico_node_memory_requests }}
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -shutdown
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
{% if calico_network_backend == "bird" %}
|
||||
- -bird-live
|
||||
{% endif %}
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }}
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
{% if calico_network_backend == "bird" %}
|
||||
- -bird-ready
|
||||
{% endif %}
|
||||
- -felix-ready
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }}
|
||||
failureThreshold: 6
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
{% if calico_datastore == "etcd" %}
|
||||
- mountPath: /calico-secrets
|
||||
name: etcd-certs
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
# For maintaining CNI plugin API credentials.
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
readOnly: false
|
||||
{% if typha_secure %}
|
||||
- name: typha-client
|
||||
mountPath: /etc/typha-client
|
||||
readOnly: true
|
||||
- name: typha-cacert
|
||||
subPath: ca.crt
|
||||
mountPath: /etc/typha-ca/ca.crt
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
- name: policysync
|
||||
mountPath: /var/run/nodeagent
|
||||
{% if calico_bpf_enabled %}
|
||||
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
|
||||
# parent directory.
|
||||
- name: sysfs
|
||||
mountPath: /sys/fs/
|
||||
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
|
||||
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
|
||||
mountPropagation: Bidirectional
|
||||
{% endif %}
|
||||
- name: cni-log-dir
|
||||
mountPath: /var/log/calico/cni
|
||||
readOnly: true
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
# Used to install CNI.
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
{% if calico_datastore == "etcd" %}
|
||||
# Mount in the etcd TLS secrets.
|
||||
- name: etcd-certs
|
||||
hostPath:
|
||||
path: "{{ calico_cert_dir }}"
|
||||
{% endif %}
|
||||
# Mount the global iptables lock file, used by calico/node
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
{% if calico_datastore == "kdd" %}
|
||||
# Mount in the directory for host-local IPAM allocations. This is
|
||||
# used when upgrading from host-local to calico-ipam, and can be removed
|
||||
# if not using the upgrade-ipam init container.
|
||||
- name: host-local-net-dir
|
||||
hostPath:
|
||||
path: /var/lib/cni/networks
|
||||
{% endif %}
|
||||
{% if typha_enabled and typha_secure %}
|
||||
- name: typha-client
|
||||
secret:
|
||||
secretName: typha-client
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: typha-client.crt
|
||||
- key: tls.key
|
||||
path: typha-client.key
|
||||
- name: typha-cacert
|
||||
hostPath:
|
||||
path: "/etc/kubernetes/ssl/"
|
||||
{% endif %}
|
||||
{% if calico_bpf_enabled %}
|
||||
- name: sysfs
|
||||
hostPath:
|
||||
path: /sys/fs/
|
||||
type: DirectoryOrCreate
|
||||
{% endif %}
|
||||
# Used to access CNI logs.
|
||||
- name: cni-log-dir
|
||||
hostPath:
|
||||
path: /var/log/calico/cni
|
||||
# Used to create per-pod Unix Domain Sockets
|
||||
- name: policysync
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /var/run/nodeagent
|
||||
# Used to install Flex Volume Driver
|
||||
- name: flexvol-driver-host
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: "{{ kubelet_flexvolumes_plugins_dir | default('/usr/libexec/kubernetes/kubelet-plugins/volume/exec') }}/nodeagent~uds"
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: {{ serial | default('20%') }}
|
||||
type: RollingUpdate
|
||||
@@ -0,0 +1,190 @@
|
||||
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
|
||||
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
ports:
|
||||
- port: 5473
|
||||
protocol: TCP
|
||||
targetPort: calico-typha
|
||||
name: calico-typha
|
||||
{% if typha_prometheusmetricsenabled %}
|
||||
- port: {{ typha_prometheusmetricsport }}
|
||||
protocol: TCP
|
||||
targetPort: http-metrics
|
||||
name: metrics
|
||||
{% endif %}
|
||||
selector:
|
||||
k8s-app: calico-typha
|
||||
|
||||
---
|
||||
|
||||
# This manifest creates a Deployment of Typha to back the above service.
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
|
||||
# typha_service_name variable in the calico-config ConfigMap above.
|
||||
#
|
||||
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
|
||||
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
|
||||
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
|
||||
replicas: {{ typha_replicas }}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-typha
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
annotations:
|
||||
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
|
||||
{% if typha_prometheusmetricsenabled %}
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: "{{ typha_prometheusmetricsport }}"
|
||||
{% endif %}
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
|
||||
# as a host-networked pod.
|
||||
serviceAccountName: calico-node
|
||||
priorityClassName: system-cluster-critical
|
||||
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
|
||||
securityContext:
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
name: calico-typha
|
||||
ports:
|
||||
- containerPort: 5473
|
||||
name: calico-typha
|
||||
protocol: TCP
|
||||
{% if typha_prometheusmetricsenabled %}
|
||||
- containerPort: {{ typha_prometheusmetricsport }}
|
||||
name: http-metrics
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
|
||||
- name: TYPHA_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
# Disable logging to file and syslog since those don't make sense in Kubernetes.
|
||||
- name: TYPHA_LOGFILEPATH
|
||||
value: "none"
|
||||
- name: TYPHA_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
# Monitor the Kubernetes API to find the number of running instances and rebalance
|
||||
# connections.
|
||||
- name: TYPHA_CONNECTIONREBALANCINGMODE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_DATASTORETYPE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
|
||||
value: "{{ typha_max_connections_lower_limit }}"
|
||||
{% if typha_secure %}
|
||||
- name: TYPHA_CAFILE
|
||||
value: /etc/ca/ca.crt
|
||||
- name: TYPHA_CLIENTCN
|
||||
value: typha-client
|
||||
- name: TYPHA_SERVERCERTFILE
|
||||
value: /etc/typha/server_certificate.pem
|
||||
- name: TYPHA_SERVERKEYFILE
|
||||
value: /etc/typha/server_key.pem
|
||||
{% endif %}
|
||||
{% if typha_prometheusmetricsenabled %}
|
||||
# Since Typha is host-networked,
|
||||
# this opens a port on the host, which may need to be secured.
|
||||
- name: TYPHA_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: TYPHA_PROMETHEUSMETRICSPORT
|
||||
value: "{{ typha_prometheusmetricsport }}"
|
||||
{% endif %}
|
||||
{% if typha_secure %}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/typha
|
||||
name: typha-server
|
||||
readOnly: true
|
||||
- mountPath: /etc/ca/ca.crt
|
||||
subPath: ca.crt
|
||||
name: cacert
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
# Needed for version >=3.7 when the 'host-local' ipam is used
|
||||
# Should never happen given templates/cni-calico.conflist.j2
|
||||
# Configure route aggregation based on pod CIDR.
|
||||
# - name: USE_POD_CIDR
|
||||
# value: "true"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9098
|
||||
host: localhost
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9098
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
{% if typha_secure %}
|
||||
volumes:
|
||||
- name: typha-server
|
||||
secret:
|
||||
secretName: typha-server
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: server_certificate.pem
|
||||
- key: tls.key
|
||||
path: server_key.pem
|
||||
- name: cacert
|
||||
hostPath:
|
||||
path: "{{ kube_cert_dir }}"
|
||||
{% endif %}
|
||||
|
||||
---
|
||||
|
||||
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
|
||||
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-typha
|
||||
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
ETCD_ENDPOINTS={{ etcd_access_addresses }} \
|
||||
ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \
|
||||
ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \
|
||||
ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \
|
||||
{{ bin_dir }}/calicoctl --allow-version-mismatch "$@"
|
||||
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
DATASTORE_TYPE=kubernetes \
|
||||
{% if inventory_hostname in groups['kube_control_plane'] %}
|
||||
KUBECONFIG=/etc/kubernetes/admin.conf \
|
||||
{% else %}
|
||||
KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \
|
||||
{% endif %}
|
||||
{{ bin_dir }}/calicoctl --allow-version-mismatch "$@"
|
||||
@@ -0,0 +1,86 @@
|
||||
{
|
||||
"name": "{{ calico_cni_name }}",
|
||||
"cniVersion":"0.3.1",
|
||||
"plugins":[
|
||||
{
|
||||
{% if calico_datastore == "kdd" %}
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
{% else %}
|
||||
{% if cloud_provider is defined %}
|
||||
"nodename": "{{ calico_kubelet_name.stdout }}",
|
||||
{% else %}
|
||||
"nodename": "{{ calico_baremetal_nodename }}",
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
{% if calico_cni_log_file_path %}
|
||||
"log_file_path": "{{ calico_cni_log_file_path }}",
|
||||
{% endif %}
|
||||
{% if calico_datastore == "etcd" %}
|
||||
"etcd_endpoints": "{{ etcd_access_addresses }}",
|
||||
"etcd_cert_file": "{{ calico_cert_dir }}/cert.crt",
|
||||
"etcd_key_file": "{{ calico_cert_dir }}/key.pem",
|
||||
"etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt",
|
||||
{% endif %}
|
||||
{% if calico_ipam_host_local is defined %}
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
{% else %}
|
||||
"ipam": {
|
||||
"type": "calico-ipam",
|
||||
{% if enable_dual_stack_networks %}
|
||||
"assign_ipv6": "true",
|
||||
{% if calico_cni_pool_ipv6 %}
|
||||
"ipv6_pools": ["{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"],
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if calico_cni_pool %}
|
||||
"ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"],
|
||||
{% endif %}
|
||||
"assign_ipv4": "true"
|
||||
},
|
||||
{% endif %}
|
||||
{% if calico_allow_ip_forwarding %}
|
||||
"container_settings": {
|
||||
"allow_ip_forwarding": true
|
||||
},
|
||||
{% endif %}
|
||||
{% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %}
|
||||
"feature_control": {
|
||||
{% for fc in calico_feature_control -%}
|
||||
{% set fcval = calico_feature_control[fc] -%}
|
||||
"{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }}
|
||||
{% endfor -%}
|
||||
{{- "" }}
|
||||
},
|
||||
{% endif %}
|
||||
{% if enable_network_policy %}
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
{% endif %}
|
||||
{% if calico_mtu is defined and calico_mtu is number %}
|
||||
"mtu": {{ calico_mtu }},
|
||||
{% endif %}
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type":"portmap",
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type":"bandwidth",
|
||||
"capabilities": {
|
||||
"bandwidth": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kubernetes-services-endpoint
|
||||
data:
|
||||
{% if calico_bpf_enabled %}
|
||||
{% if loadbalancer_apiserver is defined %}
|
||||
KUBERNETES_SERVICE_HOST: "{{ apiserver_loadbalancer_domain_name }}"
|
||||
KUBERNETES_SERVICE_PORT: "{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
|
||||
{%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool %}
|
||||
KUBERNETES_SERVICE_HOST: "127.0.0.1"
|
||||
KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}"
|
||||
{%- else %}
|
||||
KUBERNETES_SERVICE_HOST: "{{ first_kube_control_plane_address }}"
|
||||
KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}"
|
||||
{%- endif %}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Author: Smana smainklh@gmail.com
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
usage()
|
||||
{
|
||||
cat << EOF
|
||||
Create self signed certificates
|
||||
|
||||
Usage : $(basename $0) -f <config> [-d <ssldir>]
|
||||
-h | --help : Show this message
|
||||
-f | --config : Openssl configuration file
|
||||
-d | --ssldir : Directory where the certificates will be installed
|
||||
-c | --cadir : Directory where the existing CA is located
|
||||
-s | --service : Service for the ca
|
||||
|
||||
ex :
|
||||
$(basename $0) -f openssl.conf -d /srv/ssl
|
||||
EOF
|
||||
}
|
||||
|
||||
# Options parsing
|
||||
while (($#)); do
|
||||
case "$1" in
|
||||
-h | --help) usage; exit 0;;
|
||||
-f | --config) CONFIG=${2}; shift 2;;
|
||||
-d | --ssldir) SSLDIR="${2}"; shift 2;;
|
||||
-c | --cadir) CADIR="${2}"; shift 2;;
|
||||
-s | --service) SERVICE="${2}"; shift 2;;
|
||||
*)
|
||||
usage
|
||||
echo "ERROR : Unknown option"
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z ${CONFIG} ]; then
|
||||
echo "ERROR: the openssl configuration file is missing. option -f"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z ${SSLDIR} ]; then
|
||||
SSLDIR="/etc/calico/certs"
|
||||
fi
|
||||
|
||||
tmpdir=$(mktemp -d /tmp/calico_${SERVICE}_certs.XXXXXX)
|
||||
trap 'rm -rf "${tmpdir}"' EXIT
|
||||
cd "${tmpdir}"
|
||||
|
||||
mkdir -p ${SSLDIR} ${CADIR}
|
||||
|
||||
# Root CA
|
||||
if [ -e "$CADIR/ca.key" ]; then
|
||||
# Reuse existing CA
|
||||
cp $CADIR/{ca.crt,ca.key} .
|
||||
else
|
||||
openssl genrsa -out ca.key {{certificates_key_size}} > /dev/null 2>&1
|
||||
openssl req -x509 -new -nodes -key ca.key -days {{certificates_duration}} -out ca.crt -subj "/CN=calico-${SERVICE}-ca" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [ $SERVICE == "typha" ]; then
|
||||
# Typha server
|
||||
openssl genrsa -out typha-server.key {{certificates_key_size}} > /dev/null 2>&1
|
||||
openssl req -new -key typha-server.key -out typha-server.csr -subj "/CN=typha-server" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in typha-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-server.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
|
||||
# Typha client
|
||||
openssl genrsa -out typha-client.key {{certificates_key_size}} > /dev/null 2>&1
|
||||
openssl req -new -key typha-client.key -out typha-client.csr -subj "/CN=typha-client" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in typha-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-client.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
|
||||
elif [ $SERVICE == "apiserver" ]; then
|
||||
# calico-apiserver
|
||||
openssl genrsa -out apiserver.key {{certificates_key_size}} > /dev/null 2>&1
|
||||
openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=calico-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver.crt -days {{certificates_duration}} -extensions ssl_client_apiserver -extfile ${CONFIG} > /dev/null 2>&1
|
||||
else
|
||||
echo "ERROR: the openssl configuration file is missing. option -s"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install certs
|
||||
if [ -e "$CADIR/ca.key" ]; then
|
||||
# No pass existing CA
|
||||
rm -f ca.crt ca.key
|
||||
fi
|
||||
|
||||
mv {*.crt,*.key} ${SSLDIR}/
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/
|
||||
calico_wireguard_packages:
|
||||
- wireguard-dkms
|
||||
- wireguard-tools
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard-tools
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard-tools
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard-tools
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard-tools
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard-dkms
|
||||
- wireguard-tools
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
calico_wireguard_packages:
|
||||
- wireguard-tools
|
||||
Reference in New Issue
Block a user