This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,165 @@
---
# the default value of name
calico_cni_name: k8s-pod-network
# Enables Internet connectivity from containers
nat_outgoing: true
# add default ippool name
calico_pool_name: "default-pool"
calico_ipv4pool_ipip: "Off"
# Change encapsulation mode, by default we enable vxlan which is the most mature and well tested mode
calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet'
calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet'
calico_cni_pool: true
calico_cni_pool_ipv6: true
# add default ippool blockSize (defaults kube_network_node_prefix)
calico_pool_blocksize: 26
# Calico doesn't support ipip tunneling for the IPv6.
calico_ipip_mode_ipv6: Never
calico_vxlan_mode_ipv6: Never
# add default ipv6 ippool blockSize (defaults kube_network_node_prefix_ipv6)
calico_pool_blocksize_ipv6: 122
# Calico network backend can be 'bird', 'vxlan' and 'none'
calico_network_backend: vxlan
calico_cert_dir: /etc/calico/certs
# Global as_num (/calico/bgp/v1/global/as_num)
global_as_num: "64512"
# You can set MTU value here. If left undefined or empty, it will
# not be specified in calico CNI config, so Calico will use built-in
# defaults. The value should be a number, not a string.
# calico_mtu: 1500
# Advertise Service External IPs
calico_advertise_service_external_ips: []
# Advertise Service LoadBalancer IPs
calico_advertise_service_loadbalancer_ips: []
# Calico eBPF support
calico_bpf_enabled: false
calico_bpf_log_level: ""
# Valid option for service mode: Tunnel (default), DSR=Direct Server Return
calico_bpf_service_mode: Tunnel
# Limits for apps
calico_node_memory_limit: 500M
calico_node_cpu_limit: 300m
calico_node_memory_requests: 64M
calico_node_cpu_requests: 150m
calico_felix_chaininsertmode: Insert
# Calico daemonset nodeselector
calico_ds_nodeselector: "kubernetes.io/os: linux"
# Virtual network ID to use for VXLAN traffic. A value of 0 means “use the kernel default”.
calico_vxlan_vni: 4096
# Port to use for VXLAN traffic. A value of 0 means “use the kernel default”.
calico_vxlan_port: 4789
# Enable Prometheus Metrics endpoint for felix
calico_felix_prometheusmetricsenabled: false
calico_felix_prometheusmetricsport: 9091
calico_felix_prometheusgometricsenabled: true
calico_felix_prometheusprocessmetricsenabled: true
# Set the agent log level. Can be debug, warning, info or fatal
calico_loglevel: info
calico_node_startup_loglevel: error
# Set log path for calico CNI plugin. Set to false to disable logging to disk.
calico_cni_log_file_path: /var/log/calico/cni/cni.log
# Enable or disable usage report to 'usage.projectcalico.org'
calico_usage_reporting: false
# Should calico ignore kernel's RPF check setting,
# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198
calico_node_ignorelooserpf: false
# Define address on which Felix will respond to health requests
calico_healthhost: "localhost"
# Configure time in seconds that calico will wait for the iptables lock
calico_iptables_lock_timeout_secs: 10
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND)
calico_iptables_backend: "Auto"
# Calico Wireguard support
calico_wireguard_enabled: false
calico_wireguard_packages: []
calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-{{ ansible_distribution_major_version }}-$basearch/
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
# * can-reach=DESTINATION
# * interface=INTERFACE-REGEX
# see https://projectcalico.docs.tigera.io/reference/node/configuration#ip-autodetection-methods
# calico_ip_auto_method: "interface=eth.*"
# calico_ip6_auto_method: "interface=eth.*"
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the hosts interface for MTU auto-detection.
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}"
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
# The default value for calico_datastore is set in role kubespray-default
# Use typha (only with kdd)
typha_enabled: false
typha_prometheusmetricsenabled: false
typha_prometheusmetricsport: 9093
# Scaling typha: 1 replica per 100 nodes is adequate
# Number of typha replicas
typha_replicas: 1
# Set max typha connections
typha_max_connections_lower_limit: 300
# Generate certifcates for typha<->calico-node communication
typha_secure: false
calico_feature_control: {}
# Calico default BGP port
calico_bgp_listen_port: 179
# Calico FelixConfiguration options
calico_felix_reporting_interval: 0s
calico_felix_log_severity_screen: Info
# Calico container settings
calico_allow_ip_forwarding: false
# Calico IPAM strictAffinity
calico_ipam_strictaffinity: false
# Calico IPAM autoAllocateBlocks
calico_ipam_autoallocateblocks: true
# Calico IPAM maxBlocksPerHost, default 0
calico_ipam_maxblocksperhost: 0
# Calico apiserver (only with kdd)
calico_apiserver_enabled: false
# Calico feature detect override, set "ChecksumOffloadBroken=true" to
# solve the https://github.com/projectcalico/calico/issues/3145
calico_feature_detect_override: ""

View File

@@ -0,0 +1,27 @@
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
[ ssl_client ]
extendedKeyUsage = clientAuth, serverAuth
basicConstraints = CA:FALSE
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
[ v3_ca ]
basicConstraints = CA:TRUE
keyUsage = cRLSign, digitalSignature, keyCertSign
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
[ ssl_client_apiserver ]
extendedKeyUsage = clientAuth, serverAuth
basicConstraints = CA:FALSE
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
subjectAltName = DNS:calico-api.calico-apiserver.svc

View File

@@ -0,0 +1,27 @@
---
- name: reset_calico_cni
command: /bin/true
when: calico_cni_config is defined
notify:
- delete 10-calico.conflist
- Calico | delete calico-node docker containers
- Calico | delete calico-node crio/containerd containers
- name: delete 10-calico.conflist
file:
path: /etc/cni/net.d/10-calico.conflist
state: absent
- name: Calico | delete calico-node docker containers
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
register: docker_calico_node_remove
until: docker_calico_node_remove is succeeded
retries: 5
when: container_manager in ["docker"]
- name: Calico | delete calico-node crio/containerd containers
shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
register: crictl_calico_node_remove
until: crictl_calico_node_remove is succeeded
retries: 5
when: container_manager in ["crio", "containerd"]

View File

@@ -0,0 +1,5 @@
---
# Global as_num (/calico/bgp/v1/global/as_num)
# should be the same as in calico role
global_as_num: "64512"
calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}"

View File

@@ -0,0 +1,16 @@
---
- name: Calico-rr | Pre-upgrade tasks
include_tasks: pre.yml
- name: Calico-rr | Configuring node tasks
include_tasks: update-node.yml
- name: Calico-rr | Set label for route reflector # noqa 301
command: >-
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
'i-am-a-route-reflector=true' --overwrite
changed_when: false
register: calico_rr_label
until: calico_rr_label is succeeded
delay: "{{ retry_stagger | random + 3 }}"
retries: 10

View File

@@ -0,0 +1,15 @@
---
- name: Calico-rr | Disable calico-rr service if it exists
service:
name: calico-rr
state: stopped
enabled: no
failed_when: false
- name: Calico-rr | Delete obsolete files
file:
path: "{{ item }}"
state: absent
with_items:
- /etc/calico/calico-rr.env
- /etc/systemd/system/calico-rr.service

View File

@@ -0,0 +1,48 @@
---
# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it,
# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203
- block:
- name: Set the retry count
set_fact:
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
- name: Calico | Set label for route reflector # noqa 301 305
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
changed_when: false
register: calico_rr_id_label
until: calico_rr_id_label is succeeded
delay: "{{ retry_stagger | random + 3 }}"
retries: 10
when: calico_rr_id is defined
- name: Calico-rr | Fetch current node object
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
changed_when: false
register: calico_rr_node
until: calico_rr_node is succeeded
delay: "{{ retry_stagger | random + 3 }}"
retries: 10
- name: Calico-rr | Set route reflector cluster ID
set_fact:
calico_rr_node_patched: >-
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
- name: Calico-rr | Configure route reflector # noqa 301 305
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
args:
stdin: "{{ calico_rr_node_patched | to_json }}"
rescue:
- name: Fail if retry limit is reached
fail:
msg: Ended after 10 retries
when: retry_count|int == 10
- name: Retrying node configuration
debug:
msg: "Failed to configure route reflector - Retrying..."
- name: Retry node configuration
include_tasks: update-node.yml

View File

@@ -0,0 +1,60 @@
---
- name: Calico | Check if calico apiserver exists
command: "{{ kubectl }} -n calico-apiserver get secret calico-apiserver-certs"
register: calico_apiserver_secret
changed_when: false
failed_when: false
- name: Calico | Create ns manifests
template:
src: "calico-apiserver-ns.yml.j2"
dest: "{{ kube_config_dir }}/calico-apiserver-ns.yml"
mode: 0644
- name: Calico | Apply ns manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/calico-apiserver-ns.yml"
state: "latest"
- name: Calico | Ensure calico certs dir
file:
path: /etc/calico/certs
state: directory
mode: 0755
when: calico_apiserver_secret.rc != 0
- name: Calico | Copy ssl script for apiserver certs
template:
src: make-ssl-calico.sh.j2
dest: "{{ bin_dir }}/make-ssl-apiserver.sh"
mode: 0755
when: calico_apiserver_secret.rc != 0
- name: Calico | Copy ssl config for apiserver certs
copy:
src: openssl.conf
dest: /etc/calico/certs/openssl.conf
mode: 0644
when: calico_apiserver_secret.rc != 0
- name: Calico | Generate apiserver certs
command: >-
{{ bin_dir }}/make-ssl-apiserver.sh
-f /etc/calico/certs/openssl.conf
-c {{ kube_cert_dir }}
-d /etc/calico/certs
-s apiserver
when: calico_apiserver_secret.rc != 0
- name: Calico | Create calico apiserver generic secrets
command: >-
{{ kubectl }} -n calico-apiserver
create secret generic {{ item.name }}
--from-file={{ item.cert }}
--from-file={{ item.key }}
with_items:
- name: calico-apiserver-certs
cert: /etc/calico/certs/apiserver.crt
key: /etc/calico/certs/apiserver.key
when: calico_apiserver_secret.rc != 0

View File

@@ -0,0 +1,194 @@
---
- name: Stop if legacy encapsulation variables are detected (ipip)
assert:
that:
- ipip is not defined
msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if legacy encapsulation variables are detected (ipip_mode)
assert:
that:
- ipip_mode is not defined
msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks)
assert:
that:
- calcio_ipam_autoallocateblocks is not defined
msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if incompatible network plugin and cloudprovider
assert:
that:
- calico_ipip_mode == 'Never'
- calico_vxlan_mode in ['Always', 'CrossSubnet']
msg: "When using cloud_provider azure and network_plugin calico calico_ipip_mode must be 'Never' and calico_vxlan_mode 'Always' or 'CrossSubnet'"
when:
- cloud_provider is defined and cloud_provider == 'azure'
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if supported Calico versions
assert:
that:
- "calico_version in calico_crds_archive_checksums.keys()"
msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.keys() }}"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Get current calico cluster version
shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
args:
executable: /bin/bash
register: calico_version_on_server
async: 10
poll: 3
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
changed_when: false
failed_when: false
- name: Check that current calico version is enough for upgrade
assert:
that:
- calico_version_on_server.stdout is version(calico_min_version_required, '>=')
msg: >
Your version of calico is not fresh enough for upgrade.
Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release.
when:
- 'calico_version_on_server.stdout is defined'
- calico_version_on_server.stdout
- inventory_hostname == groups['kube_control_plane'][0]
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check that cluster_id is set if calico_rr enabled"
assert:
that:
- cluster_id is defined
msg: "A unique cluster_id is required if using calico_rr"
when:
- peer_with_calico_rr
- inventory_hostname == groups['kube_control_plane'][0]
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check that calico_rr nodes are in k8s_cluster group"
assert:
that:
- '"k8s_cluster" in group_names'
msg: "calico_rr must be a child group of k8s_cluster group"
when:
- '"calico_rr" in group_names'
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check vars defined correctly"
assert:
that:
- "calico_pool_name is defined"
- "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')"
msg: "calico_pool_name contains invalid characters"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check calico network backend defined correctly"
assert:
that:
- "calico_network_backend in ['bird', 'vxlan', 'none']"
msg: "calico network backend is not 'bird', 'vxlan' or 'none'"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode defined correctly"
assert:
that:
- "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']"
- "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']"
msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode if simultaneously enabled"
assert:
that:
- "calico_vxlan_mode in ['Never']"
msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
when:
- "calico_ipip_mode in ['Always', 'CrossSubnet']"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode if simultaneously enabled"
assert:
that:
- "calico_ipip_mode in ['Never']"
msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
when:
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Get Calico {{ calico_pool_name }} configuration"
command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json"
failed_when: False
changed_when: False
check_mode: no
register: calico
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_conf"
set_fact:
calico_pool_conf: '{{ calico.stdout | from_json }}'
when: calico.rc == 0 and calico.stdout
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check if inventory match current cluster configuration"
assert:
that:
- calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int)
- calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet))
- not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode
msg: "Your inventory doesn't match the current cluster configuration"
when:
- calico_pool_conf is defined
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check kdd calico_datastore if calico_apiserver_enabled"
assert:
that: calico_datastore == "kdd"
msg: "When using calico apiserver you need to use the kubernetes datastore"
when:
- calico_apiserver_enabled
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check kdd calico_datastore if typha_enabled"
assert:
that: calico_datastore == "kdd"
msg: "When using typha you need to use the kubernetes datastore"
when:
- typha_enabled
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip mode is Never for calico ipv6"
assert:
that:
- "calico_ipip_mode_ipv6 in ['Never']"
msg: "Calico doesn't support ipip tunneling for the IPv6"
when:
- enable_dual_stack_networks
run_once: True
delegate_to: "{{ groups['kube_control_plane'][0] }}"

View File

@@ -0,0 +1,475 @@
---
- name: Calico | Install Wireguard packages
package:
name: "{{ item }}"
state: present
with_items: "{{ calico_wireguard_packages }}"
register: calico_package_install
until: calico_package_install is succeeded
retries: 4
when: calico_wireguard_enabled
- name: Calico | Copy calicoctl binary from download dir
copy:
src: "{{ local_release_dir }}/calicoctl"
dest: "{{ bin_dir }}/calicoctl"
mode: 0755
remote_src: yes
- name: Calico | Write Calico cni config
template:
src: "cni-calico.conflist.j2"
dest: "/etc/cni/net.d/calico.conflist.template"
mode: 0644
owner: root
register: calico_conflist
notify: reset_calico_cni
- name: Calico | Create calico certs directory
file:
dest: "{{ calico_cert_dir }}"
state: directory
mode: 0750
owner: root
group: root
when: calico_datastore == "etcd"
- name: Calico | Link etcd certificates for calico-node
file:
src: "{{ etcd_cert_dir }}/{{ item.s }}"
dest: "{{ calico_cert_dir }}/{{ item.d }}"
state: hard
mode: 0640
force: yes
with_items:
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
when: calico_datastore == "etcd"
- name: Calico | Generate typha certs
include_tasks: typha_certs.yml
when:
- typha_secure
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Generate apiserver certs
include_tasks: calico_apiserver_certs.yml
when:
- calico_apiserver_enabled
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Install calicoctl wrapper script
template:
src: "calicoctl.{{ calico_datastore }}.sh.j2"
dest: "{{ bin_dir }}/calicoctl.sh"
mode: 0755
owner: root
group: root
- name: Calico | wait for etcd
uri:
url: "{{ etcd_access_addresses.split(',') | first }}/health"
validate_certs: no
client_cert: "{{ calico_cert_dir }}/cert.crt"
client_key: "{{ calico_cert_dir }}/key.pem"
register: result
until: result.status == 200 or result.status == 401
retries: 10
delay: 5
run_once: true
when: calico_datastore == "etcd"
- name: Calico | Check if calico network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
args:
executable: /bin/bash
register: calico_conf
retries: 4
until: calico_conf.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
assert:
that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1"
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- 'calico_conf.stdout == "0"'
- calico_pool_cidr is defined
- name: Calico | Check if calico IPv6 network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
args:
executable: /bin/bash
register: calico_conf_ipv6
retries: 4
until: calico_conf_ipv6.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
assert:
that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1"
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_pool_cidr_ipv6 is defined
- enable_dual_stack_networks
- block:
- name: Calico | Check if extra directory is needed
stat:
path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}"
register: kdd_path
- name: Calico | Set kdd path when calico < v3.22.3
set_fact:
calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}"
when:
- calico_version is version('v3.22.3', '<')
- name: Calico | Set kdd path when calico > v3.22.2
set_fact:
calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}"
when:
- calico_version is version('v3.22.2', '>')
- name: Calico | Create calico manifests for kdd
assemble:
src: "{{ calico_kdd_path }}"
dest: "{{ kube_config_dir }}/kdd-crds.yml"
mode: 0644
delimiter: "---\n"
regexp: ".*\\.yaml"
remote_src: true
- name: Calico | Create Calico Kubernetes datastore resources
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/kdd-crds.yml"
state: "latest"
when:
- inventory_hostname == groups['kube_control_plane'][0]
when:
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
- block:
- name: Calico | Get existing FelixConfiguration
command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
register: _felix_cmd
ignore_errors: True
changed_when: False
- name: Calico | Set kubespray FelixConfiguration
set_fact:
_felix_config: >
{
"kind": "FelixConfiguration",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "default",
},
"spec": {
"ipipEnabled": {{ calico_ipip_mode != 'Never' }},
"reportingInterval": "{{ calico_felix_reporting_interval }}",
"bpfLogLevel": "{{ calico_bpf_log_level }}",
"bpfEnabled": {{ calico_bpf_enabled | bool }},
"bpfExternalServiceMode": "{{ calico_bpf_service_mode }}",
"wireguardEnabled": {{ calico_wireguard_enabled | bool }},
"logSeverityScreen": "{{ calico_felix_log_severity_screen }}",
"vxlanEnabled": {{ calico_vxlan_mode != 'Never' }},
"featureDetectOverride": "{{ calico_feature_detect_override }}"
}
}
- name: Calico | Process FelixConfiguration
set_fact:
_felix_config: "{{ _felix_cmd.stdout | from_json | combine(_felix_config, recursive=True) }}"
when:
- _felix_cmd is success
- name: Calico | Configure calico FelixConfiguration
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- block:
- name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
register: _calico_pool_cmd
ignore_errors: True
changed_when: False
- name: Calico | Set kubespray calico network pool
set_fact:
_calico_pool: >
{
"kind": "IPPool",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "{{ calico_pool_name }}",
},
"spec": {
"blockSize": {{ calico_pool_blocksize | default(kube_network_node_prefix) }},
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
"ipipMode": "{{ calico_ipip_mode }}",
"vxlanMode": "{{ calico_vxlan_mode }}",
"natOutgoing": {{ nat_outgoing|default(false) }}
}
}
- name: Calico | Process calico network pool
set_fact:
_calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, recursive=True) }}"
when:
- _calico_pool_cmd is success
- name: Calico | Configure calico network pool
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- block:
- name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
register: _calico_pool_ipv6_cmd
ignore_errors: True
changed_when: False
- name: Calico | Set kubespray calico network pool
set_fact:
_calico_pool_ipv6: >
{
"kind": "IPPool",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "{{ calico_pool_name }}-ipv6",
},
"spec": {
"blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }},
"cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
"ipipMode": "{{ calico_ipip_mode_ipv6 }}",
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
"natOutgoing": {{ nat_outgoing_ipv6|default(false) }}
}
}
- name: Calico | Process calico ipv6 network pool
set_fact:
_calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, recursive=True) }}"
when:
- _calico_pool_ipv6_cmd is success
- name: Calico | Configure calico ipv6 network pool
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
- name: Populate Service External IPs
set_fact:
_service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}"
with_items: "{{ calico_advertise_service_external_ips }}"
run_once: yes
- name: Populate Service LoadBalancer IPs
set_fact:
_service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}"
with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
run_once: yes
- name: "Determine nodeToNodeMesh needed state"
set_fact:
nodeToNodeMeshEnabled: "false"
when:
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
- inventory_hostname in groups['k8s_cluster']
run_once: yes
- block:
- name: Calico | Get existing BGP Configuration
command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
register: _bgp_config_cmd
ignore_errors: True
changed_when: False
- name: Calico | Set kubespray BGP Configuration
set_fact:
_bgp_config: >
{
"kind": "BGPConfiguration",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "default",
},
"spec": {
"listenPort": {{ calico_bgp_listen_port }},
"logSeverityScreen": "Info",
{% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %}
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
{% if calico_advertise_cluster_ips|default(false) %}
"serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %}
{% if calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
"serviceExternalIPs": {{ _service_external_ips|default([]) }}
}
}
- name: Calico | Process BGP Configuration
set_fact:
_bgp_config: "{{ _bgp_config_cmd.stdout | from_json | combine(_bgp_config, recursive=True) }}"
when:
- _bgp_config_cmd is success
- name: Calico | Set up BGP Configuration
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Create calico manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: calico-config, file: calico-config.yml, type: cm}
- {name: calico-node, file: calico-node.yml, type: ds}
- {name: calico, file: calico-node-sa.yml, type: sa}
- {name: calico, file: calico-cr.yml, type: clusterrole}
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
- {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm }
register: calico_node_manifests
when:
- inventory_hostname in groups['kube_control_plane']
- rbac_enabled or item.type not in rbac_resources
- name: Calico | Create calico manifests for typha
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: calico, file: calico-typha.yml, type: typha}
register: calico_node_typha_manifest
when:
- inventory_hostname in groups['kube_control_plane']
- typha_enabled
- name: Calico | get calico apiserver caBundle
command: "{{ bin_dir }}/kubectl get secret -n calico-apiserver calico-apiserver-certs -o jsonpath='{.data.apiserver\\.crt}'"
changed_when: false
register: calico_apiserver_cabundle
when:
- inventory_hostname == groups['kube_control_plane'][0]
- calico_apiserver_enabled
- name: Calico | set calico apiserver caBundle fact
set_fact:
calico_apiserver_cabundle: "{{ calico_apiserver_cabundle.stdout }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- calico_apiserver_enabled
- name: Calico | Create calico manifests for apiserver
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: calico, file: calico-apiserver.yml, type: calico-apiserver}
register: calico_apiserver_manifest
when:
- inventory_hostname in groups['kube_control_plane']
- calico_apiserver_enabled
- name: Start Calico resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ calico_node_manifests.results }}"
- "{{ calico_node_typha_manifest.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
- name: Start Calico apiserver resources
kube:
name: "{{ item.item.name }}"
namespace: "calico-apiserver"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ calico_apiserver_manifest.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
- name: Wait for calico kubeconfig to be created
wait_for:
path: /etc/cni/net.d/calico-kubeconfig
when:
- inventory_hostname not in groups['kube_control_plane']
- calico_datastore == "kdd"
- name: Calico | Create Calico ipam manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: calico, file: calico-ipamconfig.yml, type: ipam}
when:
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
- name: Calico | Create ipamconfig resources
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/calico-ipamconfig.yml"
state: "latest"
register: resource_result
until: resource_result is succeeded
retries: 4
when:
- inventory_hostname == groups['kube_control_plane'][0]
- calico_datastore == "kdd"
- include_tasks: peer_with_calico_rr.yml
when:
- peer_with_calico_rr|default(false)
- include_tasks: peer_with_router.yml
when:
- peer_with_router|default(false)

View File

@@ -0,0 +1,6 @@
---
- import_tasks: pre.yml
- import_tasks: repos.yml
- include_tasks: install.yml

View File

@@ -0,0 +1,86 @@
---
- name: Calico | Set lable for groups nodes # noqa 301 305
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
changed_when: false
register: calico_group_id_label
until: calico_group_id_label is succeeded
delay: "{{ retry_stagger | random + 3 }}"
retries: 10
when:
- calico_group_id is defined
- name: Calico | Configure peering with route reflectors at global scope
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
# revert when it's already a string
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "{{ calico_rr_id }}-to-node"
},
"spec": {
"peerSelector": "calico-rr-id == '{{ calico_rr_id }}'",
"nodeSelector": "calico-group-id == '{{ calico_group_id }}'"
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- calico_rr_id is defined
- calico_group_id is defined
- inventory_hostname in groups['calico_rr']
- name: Calico | Configure peering with route reflectors at global scope
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
# revert when it's already a string
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "peer-to-rrs"
},
"spec": {
"nodeSelector": "!has(i-am-a-route-reflector)",
"peerSelector": "has(i-am-a-route-reflector)"
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ groups['calico_rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- calico_rr_id is not defined or calico_group_id is not defined
- name: Calico | Configure route reflectors to peer with each other
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
# revert when it's already a string
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "rr-mesh"
},
"spec": {
"nodeSelector": "has(i-am-a-route-reflector)",
"peerSelector": "has(i-am-a-route-reflector)"
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ groups['calico_rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,77 @@
---
- name: Calico | Configure peering with router(s) at global scope
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}"
},
"spec": {
"asNumber": "{{ item.as }}",
"peerIP": "{{ item.router_id }}"
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure node asNumber for per node peering
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "Node",
"metadata": {
"name": "{{ inventory_hostname }}"
},
"spec": {
"bgp": {
"asNumber": "{{ local_as }}"
},
"orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}]
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- inventory_hostname in groups['k8s_cluster']
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
- name: Calico | Configure peering with router(s) at node scope
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}"
},
"spec": {
"asNumber": "{{ item.as }}",
"node": "{{ inventory_hostname }}",
"peerIP": "{{ item.router_id }}",
"sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}"
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- inventory_hostname in groups['k8s_cluster']

View File

@@ -0,0 +1,46 @@
---
- name: Slurp CNI config
slurp:
src: /etc/cni/net.d/10-calico.conflist
register: calico_cni_config_slurp
failed_when: false
- block:
- name: Set fact calico_cni_config from slurped CNI config
set_fact:
calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}"
- name: Set fact calico_datastore to etcd if needed
set_fact:
calico_datastore: etcd
when:
- "'plugins' in calico_cni_config"
- "'etcd_endpoints' in calico_cni_config.plugins.0"
when: calico_cni_config_slurp.content is defined
- name: Calico | Get kubelet hostname
shell: >-
set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
args:
executable: /bin/bash
register: calico_kubelet_name
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- "cloud_provider is defined"
- name: Calico | Gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
tags:
- facts

View File

@@ -0,0 +1,21 @@
---
- name: Calico | Add wireguard yum repo
when:
- calico_wireguard_enabled
block:
- name: Calico | Add wireguard yum repo
yum_repository:
name: copr:copr.fedorainfracloud.org:jdoss:wireguard
file: _copr:copr.fedorainfracloud.org:jdoss:wireguard
description: Copr repo for wireguard owned by jdoss
baseurl: "{{ calico_wireguard_repo }}"
gpgcheck: yes
gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg
skip_if_unavailable: yes
enabled: yes
repo_gpgcheck: no
when:
- ansible_os_family in ['RedHat']
- ansible_distribution not in ['Fedora']
- ansible_facts['distribution_major_version'] | int < 9

View File

@@ -0,0 +1,30 @@
---
- name: reset | check vxlan.calico network device
stat:
path: /sys/class/net/vxlan.calico
get_attributes: no
get_checksum: no
get_mime: no
register: vxlan
- name: reset | remove the network vxlan.calico device created by calico
command: ip link del vxlan.calico
when: vxlan.stat.exists
- name: reset | check dummy0 network device
stat:
path: /sys/class/net/dummy0
get_attributes: no
get_checksum: no
get_mime: no
register: dummy0
- name: reset | remove the network device created by calico
command: ip link del dummy0
when: dummy0.stat.exists
- name: reset | get and remove remaining routes set by bird
shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird "
args:
executable: /bin/bash
changed_when: false

View File

@@ -0,0 +1,51 @@
---
- name: Calico | Check if typha-server exists
command: "{{ kubectl }} -n kube-system get secret typha-server"
register: typha_server_secret
changed_when: false
failed_when: false
- name: Calico | Ensure calico certs dir
file:
path: /etc/calico/certs
state: directory
mode: 0755
when: typha_server_secret.rc != 0
- name: Calico | Copy ssl script for typha certs
template:
src: make-ssl-calico.sh.j2
dest: "{{ bin_dir }}/make-ssl-typha.sh"
mode: 0755
when: typha_server_secret.rc != 0
- name: Calico | Copy ssl config for typha certs
copy:
src: openssl.conf
dest: /etc/calico/certs/openssl.conf
mode: 0644
when: typha_server_secret.rc != 0
- name: Calico | Generate typha certs
command: >-
{{ bin_dir }}/make-ssl-typha.sh
-f /etc/calico/certs/openssl.conf
-c {{ kube_cert_dir }}
-d /etc/calico/certs
-s typha
when: typha_server_secret.rc != 0
- name: Calico | Create typha tls secrets
command: >-
{{ kubectl }} -n kube-system
create secret tls {{ item.name }}
--cert {{ item.cert }}
--key {{ item.key }}
with_items:
- name: typha-server
cert: /etc/calico/certs/typha-server.crt
key: /etc/calico/certs/typha-server.key
- name: typha-client
cert: /etc/calico/certs/typha-client.crt
key: /etc/calico/certs/typha-client.key
when: typha_server_secret.rc != 0

View File

@@ -0,0 +1,10 @@
# This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change
# or be removed in future releases without further warning.
#
# Namespace and namespace-scoped resources.
apiVersion: v1
kind: Namespace
metadata:
labels:
name: calico-apiserver
name: calico-apiserver

View File

@@ -0,0 +1,287 @@
# Policy to ensure the API server isn't cut off. Can be modified, but ensure
# that the main API server is always able to reach the Calico API server.
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-apiserver
namespace: calico-apiserver
spec:
podSelector:
matchLabels:
apiserver: "true"
ingress:
- ports:
- protocol: TCP
port: 5443
---
apiVersion: v1
kind: Service
metadata:
name: calico-api
namespace: calico-apiserver
spec:
ports:
- name: apiserver
port: 443
protocol: TCP
targetPort: 5443
selector:
apiserver: "true"
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
apiserver: "true"
k8s-app: calico-apiserver
name: calico-apiserver
namespace: calico-apiserver
spec:
replicas: 1
selector:
matchLabels:
apiserver: "true"
strategy:
type: Recreate
template:
metadata:
labels:
apiserver: "true"
k8s-app: calico-apiserver
name: calico-apiserver
namespace: calico-apiserver
spec:
containers:
- args:
- --secure-port=5443
env:
- name: DATASTORE_TYPE
value: kubernetes
image: {{ calico_apiserver_image_repo }}:{{ calico_apiserver_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
livenessProbe:
httpGet:
path: /version
port: 5443
scheme: HTTPS
initialDelaySeconds: 90
periodSeconds: 10
name: calico-apiserver
readinessProbe:
exec:
command:
- /code/filecheck
failureThreshold: 5
initialDelaySeconds: 5
periodSeconds: 10
securityContext:
privileged: false
runAsUser: 0
volumeMounts:
- mountPath: /code/apiserver.local.config/certificates
name: calico-apiserver-certs
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
restartPolicy: Always
serviceAccount: calico-apiserver
serviceAccountName: calico-apiserver
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- name: calico-apiserver-certs
secret:
secretName: calico-apiserver-certs
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-apiserver
namespace: calico-apiserver
---
# Cluster-scoped resources below here.
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v3.projectcalico.org
spec:
group: projectcalico.org
groupPriorityMinimum: 1500
caBundle: {{ calico_apiserver_cabundle }}
service:
name: calico-api
namespace: calico-apiserver
port: 443
version: v3
versionPriority: 200
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-crds
rules:
- apiGroups:
- extensions
- networking.k8s.io
- ""
resources:
- networkpolicies
- nodes
- namespaces
- pods
- serviceaccounts
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- globalnetworkpolicies
- networkpolicies
- clusterinformations
- hostendpoints
- globalnetworksets
- networksets
- bgpconfigurations
- bgppeers
- felixconfigurations
- kubecontrollersconfigurations
- ippools
- ipreservations
- ipamblocks
- blockaffinities
- caliconodestatuses
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- policy
resourceNames:
- calico-apiserver
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-extension-apiserver-auth-access
rules:
- apiGroups:
- ""
resourceNames:
- extension-apiserver-authentication
resources:
- configmaps
verbs:
- list
- watch
- get
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- roles
- rolebindings
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-webhook-reader
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-apiserver-access-crds
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-crds
subjects:
- kind: ServiceAccount
name: calico-apiserver
namespace: calico-apiserver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-apiserver-delegate-auth
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: calico-apiserver
namespace: calico-apiserver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-apiserver-webhook-reader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-webhook-reader
subjects:
- kind: ServiceAccount
name: calico-apiserver
namespace: calico-apiserver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-extension-apiserver-auth-access
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-extension-apiserver-auth-access
subjects:
- kind: ServiceAccount
name: calico-apiserver
namespace: calico-apiserver

View File

@@ -0,0 +1,27 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
{% if calico_datastore == "etcd" %}
etcd_endpoints: "{{ etcd_access_addresses }}"
etcd_ca: "/calico-secrets/ca_cert.crt"
etcd_cert: "/calico-secrets/cert.crt"
etcd_key: "/calico-secrets/key.pem"
{% elif calico_datastore == "kdd" and typha_enabled %}
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas
# below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is
# essential.
typha_service_name: "calico-typha"
{% endif %}
{% if calico_network_backend == 'bird' %}
cluster_type: "kubespray,bgp"
calico_backend: "bird"
{% else %}
cluster_type: "kubespray"
calico_backend: "{{ calico_network_backend }}"
{% endif %}
{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %}
as: "{{ local_as|default(global_as_num) }}"
{% endif -%}

View File

@@ -0,0 +1,168 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
- configmaps
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
- watch
- list
{% if calico_datastore == "kdd" %}
# Used to discover Typhas.
- get
{% endif %}
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
{% if calico_datastore == "etcd" %}
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use
{% elif calico_datastore == "kdd" %}
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipreservations
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
- caliconodestatuses
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico must update some CRDs.
- apiGroups: [ "crd.projectcalico.org" ]
resources:
- caliconodestatuses
verbs:
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
{% endif %}
# Used for creating service account tokens to be used by the CNI plugin
- apiGroups: [""]
resources:
- serviceaccounts/token
resourceNames:
- calico-node
verbs:
- create

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system

View File

@@ -0,0 +1,8 @@
apiVersion: crd.projectcalico.org/v1
kind: IPAMConfig
metadata:
name: default
spec:
autoAllocateBlocks: {{ calico_ipam_autoallocateblocks }}
strictAffinity: {{ calico_ipam_strictaffinity }}
maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }}

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system

View File

@@ -0,0 +1,464 @@
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
{% if calico_datastore == "etcd" %}
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
{% endif %}
{% if calico_felix_prometheusmetricsenabled %}
prometheus.io/scrape: 'true'
prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}"
{% endif %}
spec:
nodeSelector:
{{ calico_ds_nodeselector }}
priorityClassName: system-node-critical
hostNetwork: true
serviceAccountName: calico-node
tolerations:
- operator: Exists
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
{% if calico_datastore == "kdd" %}
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
{% endif %}
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# Install CNI binaries
- name: UPDATE_CNI_BINARIES
value: "true"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG_FILE
value: "/host/etc/cni/net.d/calico.conflist.template"
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
{% if calico_datastore == "kdd" %}
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{% endif %}
volumeMounts:
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: {{ calico_flexvol_image_repo }}:{{ calico_flexvol_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# The location of the Calico etcd cluster.
{% if calico_datastore == "etcd" %}
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
{% elif calico_datastore == "kdd" %}
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
{% if typha_enabled %}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
{% if typha_secure %}
- name: FELIX_TYPHACN
value: typha-server
- name: FELIX_TYPHACAFILE
value: /etc/typha-ca/ca.crt
- name: FELIX_TYPHACERTFILE
value: /etc/typha-client/typha-client.crt
- name: FELIX_TYPHAKEYFILE
value: /etc/typha-client/typha-client.key
{% endif %}
{% endif %}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
{% endif %}
{% if calico_network_backend == 'vxlan' %}
- name: FELIX_VXLANVNI
value: "{{ calico_vxlan_vni }}"
- name: FELIX_VXLANPORT
value: "{{ calico_vxlan_port }}"
{% endif %}
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
valueFrom:
configMapKeyRef:
name: calico-config
key: cluster_type
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{ calico_endpoint_to_host_action|default('RETURN') }}"
- name: FELIX_HEALTHHOST
value: "{{ calico_healthhost }}"
{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %}
- name: FELIX_KUBENODEPORTRANGES
value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}"
{% endif %}
- name: FELIX_IPTABLESBACKEND
value: "{{ calico_iptables_backend }}"
- name: FELIX_IPTABLESLOCKTIMEOUTSECS
value: "{{ calico_iptables_lock_timeout_secs }}"
# should be set in etcd before deployment
# # Configure the IP Pool from which Pod IPs will be chosen.
# - name: CALICO_IPV4POOL_CIDR
# value: "{{ calico_pool_cidr | default(kube_pods_subnet) }}"
- name: CALICO_IPV4POOL_IPIP
value: "{{ calico_ipv4pool_ipip }}"
- name: FELIX_IPV6SUPPORT
value: "{{ enable_dual_stack_networks | default(false) }}"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{ calico_loglevel }}"
# Set Calico startup logging to "error"
- name: CALICO_STARTUP_LOGLEVEL
value: "{{ calico_node_startup_loglevel }}"
# Enable or disable usage report
- name: FELIX_USAGEREPORTINGENABLED
value: "{{ calico_usage_reporting }}"
# Set MTU for tunnel device used if ipip is enabled
{% if calico_mtu is defined %}
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
value: "{{ calico_veth_mtu | default(calico_mtu) }}"
{% endif %}
- name: FELIX_CHAININSERTMODE
value: "{{ calico_felix_chaininsertmode }}"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{ calico_felix_prometheusmetricsenabled }}"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{ calico_felix_prometheusmetricsport }}"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{ calico_felix_prometheusgometricsenabled }}"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{ calico_felix_prometheusprocessmetricsenabled }}"
{% if calico_ip_auto_method is defined %}
- name: IP_AUTODETECTION_METHOD
value: "{{ calico_ip_auto_method }}"
{% else %}
- name: NODEIP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: IP_AUTODETECTION_METHOD
value: "can-reach=$(NODEIP)"
{% endif %}
- name: IP
value: "autodetect"
{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %}
- name: IP6_AUTODETECTION_METHOD
value: "{{ calico_ip6_auto_method }}"
{% endif %}
{% if calico_felix_mtu_iface_pattern is defined %}
- name: FELIX_MTUIFACEPATTERN
value: "{{ calico_felix_mtu_iface_pattern }}"
{% endif %}
{% if enable_dual_stack_networks %}
- name: IP6
value: autodetect
{% endif %}
{% if calico_use_default_route_src_ipaddr|default(false) %}
- name: FELIX_DEVICEROUTESOURCEADDRESS
valueFrom:
fieldRef:
fieldPath: status.hostIP
{% endif %}
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_IGNORELOOSERPF
value: "{{ calico_node_ignorelooserpf }}"
- name: CALICO_MANAGE_CNI
value: "true"
{% if calico_node_extra_envs is defined %}
{% for key in calico_node_extra_envs %}
- name: {{ key }}
value: "{{ calico_node_extra_envs[key] }}"
{% endfor %}
{% endif %}
securityContext:
privileged: true
resources:
limits:
cpu: {{ calico_node_cpu_limit }}
memory: {{ calico_node_memory_limit }}
requests:
cpu: {{ calico_node_cpu_requests }}
memory: {{ calico_node_memory_requests }}
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
{% if calico_network_backend == "bird" %}
- -bird-live
{% endif %}
periodSeconds: 10
initialDelaySeconds: 10
timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }}
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
{% if calico_network_backend == "bird" %}
- -bird-ready
{% endif %}
- -felix-ready
periodSeconds: 10
timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }}
failureThreshold: 6
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
{% if calico_datastore == "etcd" %}
- mountPath: /calico-secrets
name: etcd-certs
readOnly: true
{% endif %}
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
{% if typha_secure %}
- name: typha-client
mountPath: /etc/typha-client
readOnly: true
- name: typha-cacert
subPath: ca.crt
mountPath: /etc/typha-ca/ca.crt
readOnly: true
{% endif %}
- name: policysync
mountPath: /var/run/nodeagent
{% if calico_bpf_enabled %}
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
{% endif %}
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
# Used to install CNI.
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
{% if calico_datastore == "etcd" %}
# Mount in the etcd TLS secrets.
- name: etcd-certs
hostPath:
path: "{{ calico_cert_dir }}"
{% endif %}
# Mount the global iptables lock file, used by calico/node
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
{% if calico_datastore == "kdd" %}
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
{% endif %}
{% if typha_enabled and typha_secure %}
- name: typha-client
secret:
secretName: typha-client
items:
- key: tls.crt
path: typha-client.crt
- key: tls.key
path: typha-client.key
- name: typha-cacert
hostPath:
path: "/etc/kubernetes/ssl/"
{% endif %}
{% if calico_bpf_enabled %}
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
{% endif %}
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: "{{ kubelet_flexvolumes_plugins_dir | default('/usr/libexec/kubernetes/kubelet-plugins/volume/exec') }}/nodeagent~uds"
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View File

@@ -0,0 +1,190 @@
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
{% if typha_prometheusmetricsenabled %}
- port: {{ typha_prometheusmetricsport }}
protocol: TCP
targetPort: http-metrics
name: metrics
{% endif %}
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the calico-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ typha_replicas }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
template:
metadata:
labels:
k8s-app: calico-typha
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
{% if typha_prometheusmetricsenabled %}
prometheus.io/scrape: 'true'
prometheus.io/port: "{{ typha_prometheusmetricsport }}"
{% endif %}
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: calico-node
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
{% if typha_prometheusmetricsenabled %}
- containerPort: {{ typha_prometheusmetricsport }}
name: http-metrics
protocol: TCP
{% endif %}
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
value: "{{ typha_max_connections_lower_limit }}"
{% if typha_secure %}
- name: TYPHA_CAFILE
value: /etc/ca/ca.crt
- name: TYPHA_CLIENTCN
value: typha-client
- name: TYPHA_SERVERCERTFILE
value: /etc/typha/server_certificate.pem
- name: TYPHA_SERVERKEYFILE
value: /etc/typha/server_key.pem
{% endif %}
{% if typha_prometheusmetricsenabled %}
# Since Typha is host-networked,
# this opens a port on the host, which may need to be secured.
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{ typha_prometheusmetricsport }}"
{% endif %}
{% if typha_secure %}
volumeMounts:
- mountPath: /etc/typha
name: typha-server
readOnly: true
- mountPath: /etc/ca/ca.crt
subPath: ca.crt
name: cacert
readOnly: true
{% endif %}
# Needed for version >=3.7 when the 'host-local' ipam is used
# Should never happen given templates/cni-calico.conflist.j2
# Configure route aggregation based on pod CIDR.
# - name: USE_POD_CIDR
# value: "true"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
{% if typha_secure %}
volumes:
- name: typha-server
secret:
secretName: typha-server
items:
- key: tls.crt
path: server_certificate.pem
- key: tls.key
path: server_key.pem
- name: cacert
hostPath:
path: "{{ kube_cert_dir }}"
{% endif %}
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha

View File

@@ -0,0 +1,6 @@
#!/bin/bash
ETCD_ENDPOINTS={{ etcd_access_addresses }} \
ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \
ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \
ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \
{{ bin_dir }}/calicoctl --allow-version-mismatch "$@"

View File

@@ -0,0 +1,8 @@
#!/bin/bash
DATASTORE_TYPE=kubernetes \
{% if inventory_hostname in groups['kube_control_plane'] %}
KUBECONFIG=/etc/kubernetes/admin.conf \
{% else %}
KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \
{% endif %}
{{ bin_dir }}/calicoctl --allow-version-mismatch "$@"

View File

@@ -0,0 +1,86 @@
{
"name": "{{ calico_cni_name }}",
"cniVersion":"0.3.1",
"plugins":[
{
{% if calico_datastore == "kdd" %}
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
{% else %}
{% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}",
{% else %}
"nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
{% endif %}
"type": "calico",
"log_level": "info",
{% if calico_cni_log_file_path %}
"log_file_path": "{{ calico_cni_log_file_path }}",
{% endif %}
{% if calico_datastore == "etcd" %}
"etcd_endpoints": "{{ etcd_access_addresses }}",
"etcd_cert_file": "{{ calico_cert_dir }}/cert.crt",
"etcd_key_file": "{{ calico_cert_dir }}/key.pem",
"etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt",
{% endif %}
{% if calico_ipam_host_local is defined %}
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
{% else %}
"ipam": {
"type": "calico-ipam",
{% if enable_dual_stack_networks %}
"assign_ipv6": "true",
{% if calico_cni_pool_ipv6 %}
"ipv6_pools": ["{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"],
{% endif %}
{% endif %}
{% if calico_cni_pool %}
"ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"],
{% endif %}
"assign_ipv4": "true"
},
{% endif %}
{% if calico_allow_ip_forwarding %}
"container_settings": {
"allow_ip_forwarding": true
},
{% endif %}
{% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %}
"feature_control": {
{% for fc in calico_feature_control -%}
{% set fcval = calico_feature_control[fc] -%}
"{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }}
{% endfor -%}
{{- "" }}
},
{% endif %}
{% if enable_network_policy %}
"policy": {
"type": "k8s"
},
{% endif %}
{% if calico_mtu is defined and calico_mtu is number %}
"mtu": {{ calico_mtu }},
{% endif %}
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type":"portmap",
"capabilities": {
"portMappings": true
}
},
{
"type":"bandwidth",
"capabilities": {
"bandwidth": true
}
}
]
}

View File

@@ -0,0 +1,19 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: kube-system
name: kubernetes-services-endpoint
data:
{% if calico_bpf_enabled %}
{% if loadbalancer_apiserver is defined %}
KUBERNETES_SERVICE_HOST: "{{ apiserver_loadbalancer_domain_name }}"
KUBERNETES_SERVICE_PORT: "{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
{%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool %}
KUBERNETES_SERVICE_HOST: "127.0.0.1"
KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}"
{%- else %}
KUBERNETES_SERVICE_HOST: "{{ first_kube_control_plane_address }}"
KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_port }}"
{%- endif %}
{% endif %}

View File

@@ -0,0 +1,102 @@
#!/bin/bash
# Author: Smana smainklh@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
usage()
{
cat << EOF
Create self signed certificates
Usage : $(basename $0) -f <config> [-d <ssldir>]
-h | --help : Show this message
-f | --config : Openssl configuration file
-d | --ssldir : Directory where the certificates will be installed
-c | --cadir : Directory where the existing CA is located
-s | --service : Service for the ca
ex :
$(basename $0) -f openssl.conf -d /srv/ssl
EOF
}
# Options parsing
while (($#)); do
case "$1" in
-h | --help) usage; exit 0;;
-f | --config) CONFIG=${2}; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
-c | --cadir) CADIR="${2}"; shift 2;;
-s | --service) SERVICE="${2}"; shift 2;;
*)
usage
echo "ERROR : Unknown option"
exit 3
;;
esac
done
if [ -z ${CONFIG} ]; then
echo "ERROR: the openssl configuration file is missing. option -f"
exit 1
fi
if [ -z ${SSLDIR} ]; then
SSLDIR="/etc/calico/certs"
fi
tmpdir=$(mktemp -d /tmp/calico_${SERVICE}_certs.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
mkdir -p ${SSLDIR} ${CADIR}
# Root CA
if [ -e "$CADIR/ca.key" ]; then
# Reuse existing CA
cp $CADIR/{ca.crt,ca.key} .
else
openssl genrsa -out ca.key {{certificates_key_size}} > /dev/null 2>&1
openssl req -x509 -new -nodes -key ca.key -days {{certificates_duration}} -out ca.crt -subj "/CN=calico-${SERVICE}-ca" > /dev/null 2>&1
fi
if [ $SERVICE == "typha" ]; then
# Typha server
openssl genrsa -out typha-server.key {{certificates_key_size}} > /dev/null 2>&1
openssl req -new -key typha-server.key -out typha-server.csr -subj "/CN=typha-server" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in typha-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-server.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
# Typha client
openssl genrsa -out typha-client.key {{certificates_key_size}} > /dev/null 2>&1
openssl req -new -key typha-client.key -out typha-client.csr -subj "/CN=typha-client" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in typha-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-client.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
elif [ $SERVICE == "apiserver" ]; then
# calico-apiserver
openssl genrsa -out apiserver.key {{certificates_key_size}} > /dev/null 2>&1
openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=calico-apiserver" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver.crt -days {{certificates_duration}} -extensions ssl_client_apiserver -extfile ${CONFIG} > /dev/null 2>&1
else
echo "ERROR: the openssl configuration file is missing. option -s"
exit 1
fi
# Install certs
if [ -e "$CADIR/ca.key" ]; then
# No pass existing CA
rm -f ca.crt ca.key
fi
mv {*.crt,*.key} ${SSLDIR}/

View File

@@ -0,0 +1,5 @@
---
calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/
calico_wireguard_packages:
- wireguard-dkms
- wireguard-tools

View File

@@ -0,0 +1,3 @@
---
calico_wireguard_packages:
- wireguard-tools

View File

@@ -0,0 +1,3 @@
---
calico_wireguard_packages:
- wireguard

View File

@@ -0,0 +1,3 @@
---
calico_wireguard_packages:
- wireguard-tools

View File

@@ -0,0 +1,3 @@
---
calico_wireguard_packages:
- wireguard-tools

View File

@@ -0,0 +1,3 @@
---
calico_wireguard_packages:
- wireguard-tools

View File

@@ -0,0 +1,4 @@
---
calico_wireguard_packages:
- wireguard-dkms
- wireguard-tools

View File

@@ -0,0 +1,3 @@
---
calico_wireguard_packages:
- wireguard-tools

View File

@@ -0,0 +1,33 @@
---
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is choosing using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
canal_masquerade: "true"
# Etcd SSL dirs
canal_cert_dir: /etc/canal/certs
# Canal Network Policy directory
canal_policy_dir: /etc/kubernetes/policy
# Limits for apps
calico_node_memory_limit: 500M
calico_node_cpu_limit: 200m
calico_node_memory_requests: 64M
calico_node_cpu_requests: 50m
flannel_memory_limit: 500M
flannel_cpu_limit: 200m
flannel_memory_requests: 64M
flannel_cpu_requests: 50m
# etcd cert filenames
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Set log path for calico CNI plugin. Set to false to disable logging to disk.
calico_cni_log_file_path: /var/log/calico/cni/cni.log

View File

@@ -0,0 +1,14 @@
---
- name: reset_canal_cni
command: /bin/true
notify:
- delete 10-canal.conflist
- delete canal-node containers
- name: delete 10-canal.conflist
file:
path: /etc/canal/10-canal.conflist
state: absent
- name: delete canal-node containers
shell: "docker ps -af name=k8s_POD_canal-node* -q | xargs --no-run-if-empty docker rm -f"

View File

@@ -0,0 +1,103 @@
---
- name: Canal | Write Canal cni config
template:
src: "cni-canal.conflist.j2"
dest: "/etc/cni/net.d/canal.conflist.template"
mode: 0644
owner: "{{ kube_owner }}"
register: canal_conflist
notify: reset_canal_cni
- name: Canal | Create canal certs directory
file:
dest: "{{ canal_cert_dir }}"
state: directory
mode: 0750
owner: root
group: root
- name: Canal | Link etcd certificates for canal-node
file:
src: "{{ etcd_cert_dir }}/{{ item.s }}"
dest: "{{ canal_cert_dir }}/{{ item.d }}"
state: hard
mode: 0640
force: yes
with_items:
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
- name: Slurp etcd cacert file
slurp:
src: "{{ canal_cert_dir }}/ca_cert.crt"
register: etcd_ca_cert_file
failed_when: false
- name: Slurp etcd cert file
slurp:
src: "{{ canal_cert_dir }}/cert.crt"
register: etcd_cert_file
failed_when: false
- name: Slurp etcd key file
slurp:
src: "{{ canal_cert_dir }}/key.pem"
register: etcd_key_file
failed_when: false
# Flannel need etcd v2 API
- name: Canal | Set Flannel etcd configuration
command: |-
{{ bin_dir }}/etcdctl set /coreos.com/network/config \
'{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
delegate_to: "{{ groups['etcd'][0] }}"
changed_when: false
run_once: true
environment:
ETCDCTL_API: 2
ETCDCTL_CA_FILE: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
ETCDCTL_CERT_FILE: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'][0] + '.pem' }}"
ETCDCTL_KEY_FILE: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'][0] + '-key.pem' }}"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
- name: Canal | Create canal node manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: canal-calico-etcd-secret, file: canal-secret-calico-etcd.yml, type: secret}
- {name: canal-config, file: canal-config.yaml, type: cm}
- {name: canal-node, file: canal-node.yaml, type: ds}
- {name: canal-kube-controllers, file: canal-calico-kube-controllers.yml, type: deployment}
- {name: canal-cr, file: canal-cr.yml, type: clusterrole}
- {name: canal, file: canal-node-sa.yml, type: sa}
- {name: calico-cr, file: canal-cr-calico-node.yml, type: clusterrole}
- {name: calico-kube-cr, file: canal-cr-calico-kube-controllers.yml, type: clusterrole}
- {name: calico-crd, file: canal-crd-calico.yml, type: crd}
- {name: flannel, file: canal-cr-flannel.yml, type: clusterrole}
- {name: canal, file: canal-crb-canal.yml, type: clusterrolebinding}
- {name: canal-calico, file: canal-crb-calico.yml, type: clusterrolebinding}
- {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding}
register: canal_manifests
when:
- inventory_hostname in groups['kube_control_plane']
- name: Canal | Install calicoctl wrapper script
template:
src: calicoctl.sh.j2
dest: "{{ bin_dir }}/calicoctl.sh"
mode: 0755
owner: root
group: root
- name: Canal | Create network policy directory
file:
path: "{{ canal_policy_dir }}"
state: directory
mode: 0755

View File

@@ -0,0 +1,6 @@
#!/bin/bash
ETCD_ENDPOINTS={{ etcd_access_addresses }} \
ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \
ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \
ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \
{{ bin_dir }}/calicoctl "$@"

View File

@@ -0,0 +1,96 @@
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-kube-controllers
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_cert
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
volumes:
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0440

View File

@@ -0,0 +1,80 @@
# This ConfigMap can be used to configure a self-hosted Canal installation.
# See `canal.yaml` for an example of a Canal deployment which uses
# the config in this ConfigMap.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "{{ etcd_access_addresses }}"
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
# Typha is disabled.
typha_service_name: "none"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: "{{ canal_iface }}"
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "{{ canal_masquerade }}"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "canal",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"type": "calico",
"include_default_routes": true,
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
{% if calico_cni_log_file_path %}
"log_file_path": "{{ calico_cni_log_file_path }}",
{% endif %}
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ kube_pods_subnet }}",
"Backend": {
"Type": "vxlan"
}
}

View File

@@ -0,0 +1,83 @@
# Source: calico/templates/calico-kube-controllers-rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are watched to check for existence as part of IPAM controller.
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
# IPAM resources are manipulated in response to node and block updates, as well as periodic triggers.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipreservations
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
# Pools are watched to maintain a mapping of blocks to IP pools.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- watch
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- list
- create
- update
- watch
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch

View File

@@ -0,0 +1,133 @@
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# Used for creating service account tokens to be used by the CNI plugin
- apiGroups: [""]
resources:
- serviceaccounts/token
resourceNames:
- canal
verbs:
- create
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipreservations
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
- caliconodestatuses
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico must update some CRDs.
- apiGroups: [ "crd.projectcalico.org" ]
resources:
- caliconodestatuses
verbs:
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update

View File

@@ -0,0 +1,23 @@
# Flannel ClusterRole
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- list
- watch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch

View File

@@ -0,0 +1,30 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal
rules:
# Used for creating service account tokens to be used by the CNI plugin
- apiGroups: [""]
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list

View File

@@ -0,0 +1,27 @@
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: canal
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system

View File

@@ -0,0 +1,14 @@
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system

View File

@@ -0,0 +1,418 @@
# Source: calico/templates/calico-node.yaml
# This manifest installs the canal container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
template:
metadata:
labels:
k8s-app: canal
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure canal gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: canal
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Set the serviceaccount name to use for the Calico CNI plugin.
# We use canal-node instead of calico-node when using flannel networking.
- name: CALICO_CNI_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_cert
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
securityContext:
privileged: true
# This init container mounts the necessary filesystems needed by the BPF data plane
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
- name: "mount-bpffs"
image: "{{ calico_node_image_repo }}:{{ calico_node_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["calico-node", "-init", "-best-effort"]
volumeMounts:
- mountPath: /sys/fs
name: sys-fs
# Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host
# so that it outlives the init container.
mountPropagation: Bidirectional
- mountPath: /var/run/calico
name: var-run-calico
# Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host
# so that it outlives the init container.
mountPropagation: Bidirectional
# Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary,
# executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly.
- mountPath: /nodeproc
name: nodeproc
readOnly: true
securityContext:
privileged: true
containers:
# Runs canal container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: "{{ calico_node_image_repo }}:{{ calico_node_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_cert
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set the serviceaccount name to use for the Calico CNI plugin.
# We use canal-node instead of calico-node when using flannel networking.
- name: CALICO_CNI_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
limits:
cpu: {{ calico_node_cpu_limit }}
memory: {{ calico_node_memory_limit }}
requests:
cpu: {{ calico_node_cpu_requests }}
memory: {{ calico_node_memory_requests }}
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /readiness
port: 9099
host: localhost
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: bpffs
mountPath: /sys/fs/bpf
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
# Runs the flannel daemon to enable vxlan networking between
# container hosts.
- name: flannel
image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}"
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"]
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
# The location of the etcd cluster.
- name: FLANNELD_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_cert
# Location of the CA certificate for etcd.
- name: FLANNELD_ETCD_CAFILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_ca
# Location of the client key for etcd.
- name: FLANNELD_ETCD_KEYFILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_key
# Location of the client certificate for etcd.
- name: FLANNELD_ETCD_CERTFILE
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_cert
# The interface flannel should run on.
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
# Perform masquerade on traffic leaving the pod cidr.
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
# Write the subnet.env file to the mounted directory.
- name: FLANNELD_SUBNET_FILE
value: "/run/flannel/subnet.env"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/resolv.conf
name: resolv
- mountPath: /run/flannel
name: run-flannel
- mountPath: /calico-secrets
name: etcd-certs
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: flannel-cfg
configMap:
name: canal-config
# Used by canal-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sys-fs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
- name: bpffs
hostPath:
path: /sys/fs/bpf
type: Directory
# mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs.
- name: nodeproc
hostPath:
path: /proc
# Used by flannel.
- name: run-flannel
hostPath:
path: /run/flannel
- name: resolv
hostPath:
path: /etc/resolv.conf
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent

View File

@@ -0,0 +1,18 @@
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# The keys below should be uncommented and the values populated with the base64
# encoded contents of each file that would be associated with the TLS data.
# Example command for encoding a file contents: cat <file> | base64 -w 0
etcd-key: {{ etcd_key_file.content }}
etcd-cert: {{ etcd_cert_file.content }}
etcd-ca: {{ etcd_ca_cert_file.content }}

View File

@@ -0,0 +1,34 @@
{
"name": "canal",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"type": "calico",
"include_default_routes": true,
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
{% if calico_cni_log_file_path %}
"log_file_path": "{{ calico_cni_log_file_path }}",
{% endif %}
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}

View File

@@ -0,0 +1,256 @@
---
cilium_min_version_required: "1.10"
# Log-level
cilium_debug: false
cilium_mtu: ""
cilium_enable_ipv4: true
cilium_enable_ipv6: false
# Cilium agent health port
cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}"
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# `kubectl get ciliumid`
# - "kvstore" stores identities in an etcd kvstore.
# - In order to support External Workloads, "crd" is required
# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta
# - KVStore operations are only required when cilium-operator is running with any of the below options:
# - --synchronize-k8s-services
# - --synchronize-k8s-nodes
# - --identity-allocation-mode=kvstore
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
cilium_identity_allocation_mode: kvstore
# Etcd SSL dirs
cilium_cert_dir: /etc/cilium/certs
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Limits for apps
cilium_memory_limit: 500M
cilium_cpu_limit: 500m
cilium_memory_requests: 64M
cilium_cpu_requests: 100m
# Overlay Network Mode
cilium_tunnel_mode: vxlan
# Optional features
cilium_enable_prometheus: false
# Enable if you want to make use of hostPort mappings
cilium_enable_portmap: false
# Monitor aggregation level (none/low/medium/maximum)
cilium_monitor_aggregation: medium
# Kube Proxy Replacement mode (strict/probe/partial)
cilium_kube_proxy_replacement: probe
# If upgrading from Cilium < 1.5, you may want to override some of these options
# to prevent service disruptions. See also:
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
cilium_preallocate_bpf_maps: false
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
cilium_tofqdns_enable_poller: false
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
cilium_enable_legacy_services: false
# Deploy cilium even if kube_network_plugin is not cilium.
# This enables to deploy cilium alongside another CNI to replace kube-proxy.
cilium_deploy_additionally: false
# Auto direct nodes routes can be used to advertise pods routes in your cluster
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
# This works only if you have a L2 connectivity between all your nodes.
# You wil also have to specify the variable `cilium_native_routing_cidr` to
# make this work. Please refer to the cilium documentation for more
# information about this kind of setups.
cilium_auto_direct_node_routes: false
# Allows to explicitly specify the IPv4 CIDR for native routing.
# When specified, Cilium assumes networking for this CIDR is preconfigured and
# hands traffic destined for that range to the Linux network stack without
# applying any SNAT.
# Generally speaking, specifying a native routing CIDR implies that Cilium can
# depend on the underlying networking stack to route packets to their
# destination. To offer a concrete example, if Cilium is configured to use
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
# the user must configure the routes to reach pods, either manually or by
# setting the auto-direct-node-routes flag.
cilium_native_routing_cidr: ""
# Allows to explicitly specify the IPv6 CIDR for native routing.
cilium_native_routing_cidr_ipv6: ""
# Enable transparent network encryption.
cilium_encryption_enabled: false
# Encryption method. Can be either ipsec or wireguard.
# Only effective when `cilium_encryption_enabled` is set to true.
cilium_encryption_type: "ipsec"
# Enable encryption for pure node to node traffic.
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
cilium_ipsec_node_encryption: false
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
# it will fallback on the wireguard-go user-space implementation of WireGuard.
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
cilium_wireguard_userspace_fallback: false
# Enable Bandwidth Manager
# Ciliums bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
cilium_enable_bandwidth_manager: false
# IP Masquerade Agent
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
cilium_ip_masq_agent_enable: false
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
cilium_non_masquerade_cidrs:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- 100.64.0.0/10
- 192.0.0.0/24
- 192.0.2.0/24
- 192.88.99.0/24
- 198.18.0.0/15
- 198.51.100.0/24
- 203.0.113.0/24
- 240.0.0.0/4
### Indicates whether to masquerade traffic to the link local prefix.
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
cilium_masq_link_local: false
### A time interval at which the agent attempts to reload config from disk
cilium_ip_masq_resync_interval: 60s
# Hubble
### Enable Hubble without install
cilium_enable_hubble: false
### Enable Hubble Metrics
cilium_enable_hubble_metrics: false
### if cilium_enable_hubble_metrics: true
cilium_hubble_metrics: {}
# - dns
# - drop
# - tcp
# - flow
# - icmp
# - http
### Enable Hubble install
cilium_hubble_install: false
### Enable auto generate certs if cilium_hubble_install: true
cilium_hubble_tls_generate: false
# IP address management mode for v1.9+.
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
cilium_ipam_mode: kubernetes
# Extra arguments for the Cilium agent
cilium_agent_custom_args: []
# For adding and mounting extra volumes to the cilium agent
cilium_agent_extra_volumes: []
cilium_agent_extra_volume_mounts: []
cilium_agent_extra_env_vars: []
cilium_operator_replicas: 2
# The address at which the cillium operator bind health check api
cilium_operator_api_serve_addr: "127.0.0.1:9234"
## A dictionary of extra config variables to add to cilium-config, formatted like:
## cilium_config_extra_vars:
## var1: "value1"
## var2: "value2"
cilium_config_extra_vars: {}
# For adding and mounting extra volumes to the cilium operator
cilium_operator_extra_volumes: []
cilium_operator_extra_volume_mounts: []
# Extra arguments for the Cilium Operator
cilium_operator_custom_args: []
# Name of the cluster. Only relevant when building a mesh of clusters.
cilium_cluster_name: default
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime.
# Available for Cilium v1.10 and up.
cilium_cni_exclusive: true
# Configure the log file for CNI logging with retention policy of 7 days.
# Disable CNI file logging by setting this field to empty explicitly.
# Available for Cilium v1.12 and up.
cilium_cni_log_file: "/var/run/cilium/cilium-cni.log"
# -- Configure cgroup related configuration
# -- Enable auto mount of cgroup2 filesystem.
# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at
# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod.
# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted
# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the
# volume will be mounted inside the cilium agent pod at the same path.
# Available for Cilium v1.11 and up
cilium_cgroup_auto_mount: true
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host
cilium_cgroup_host_root: "/run/cilium/cgroupv2"
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
cilium_bpf_map_dynamic_size_ratio: "0.0025"
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
# Available for Cilium v1.10 and up
cilium_enable_ipv4_masquerade: true
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
# Available for Cilium v1.10 and up
cilium_enable_ipv6_masquerade: true
# -- Enable native IP masquerade support in eBPF
cilium_enable_bpf_masquerade: false
# -- Configure whether direct routing mode should route traffic via
# host stack (true) or directly and more efficiently out of BPF (false) if
# the kernel supports it. The latter has the implication that it will also
# bypass netfilter in the host namespace.
cilium_enable_host_legacy_routing: true
# -- Enable use of the remote node identity.
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
cilium_enable_remote_node_identity: true
# -- Enable the use of well-known identities.
cilium_enable_well_known_identities: false
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
cilium_monitor_aggregation_flags: "all"
cilium_enable_bpf_clock_probe: true
# -- Whether to enable CNP status updates.
cilium_disable_cnp_status_updates: true
# Configure how long to wait for the Cilium DaemonSet to be ready again
cilium_rolling_restart_wait_retries_count: 30
cilium_rolling_restart_wait_retries_delay_seconds: 10
# Cilium changed the default metrics exporter ports in 1.12
cilium_agent_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9962', '9090') }}"
cilium_operator_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9963', '6942') }}"
cilium_hubble_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9965', '9091') }}"

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: network_plugin/cni

View File

@@ -0,0 +1,33 @@
---
- name: Cilium | Start Resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}"
state: "latest"
loop: "{{ cilium_node_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Cilium | Wait for pods to run
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
register: pods_not_ready
until: pods_not_ready.stdout.find("cilium")==-1
retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"
delay: "{{ cilium_rolling_restart_wait_retries_delay_seconds | int }}"
failed_when: false
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cilium | Hubble install
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}"
state: "latest"
loop: "{{ cilium_hubble_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- cilium_enable_hubble and cilium_hubble_install

View File

@@ -0,0 +1,63 @@
---
- name: Cilium | Check Cilium encryption `cilium_ipsec_key` for ipsec
assert:
that:
- "cilium_ipsec_key is defined"
msg: "cilium_ipsec_key should be defined to enable encryption using ipsec"
when:
- cilium_encryption_enabled
- cilium_encryption_type == "ipsec"
- cilium_tunnel_mode in ['vxlan']
# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled`
- name: Stop if `cilium_ipsec_enabled` is defined and `cilium_encryption_type` is not `ipsec`
assert:
that: cilium_encryption_type == 'ipsec'
msg: >
It is not possible to use `cilium_ipsec_enabled` when `cilium_encryption_type` is set to {{ cilium_encryption_type }}.
when:
- cilium_ipsec_enabled is defined
- cilium_ipsec_enabled
- kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool
- name: Stop if kernel version is too low for Cilium Wireguard encryption
assert:
that: ansible_kernel.split('-')[0] is version('5.6.0', '>=')
when:
- kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool
- cilium_encryption_enabled
- cilium_encryption_type == "wireguard"
- not ignore_assert_errors
- name: Stop if bad Cilium identity allocation mode
assert:
that: cilium_identity_allocation_mode in ['crd', 'kvstore']
msg: "cilium_identity_allocation_mode must be either 'crd' or 'kvstore'"
- name: Stop if bad Cilium Cluster ID
assert:
that:
- cilium_cluster_id <= 255
- cilium_cluster_id >= 0
msg: "'cilium_cluster_id' must be between 1 and 255"
when: cilium_cluster_id is defined
- name: Stop if bad encryption type
assert:
that: cilium_encryption_type in ['ipsec', 'wireguard']
msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'"
when: cilium_encryption_enabled
- name: Stop if cilium_version is < v1.10.0
assert:
that: cilium_version | regex_replace('v') is version(cilium_min_version_required, '>=')
msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}"
# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled`
- name: Set `cilium_encryption_type` to "ipsec" and if `cilium_ipsec_enabled` is true
set_fact:
cilium_encryption_type: ipsec
cilium_encryption_enabled: true
when:
- cilium_ipsec_enabled is defined
- cilium_ipsec_enabled

View File

@@ -0,0 +1,97 @@
---
- name: Cilium | Ensure BPFFS mounted
mount:
fstype: bpf
path: /sys/fs/bpf
src: bpffs
state: mounted
- name: Cilium | Create Cilium certs directory
file:
dest: "{{ cilium_cert_dir }}"
state: directory
mode: 0750
owner: root
group: root
when:
- cilium_identity_allocation_mode == "kvstore"
- name: Cilium | Link etcd certificates for cilium
file:
src: "{{ etcd_cert_dir }}/{{ item.s }}"
dest: "{{ cilium_cert_dir }}/{{ item.d }}"
mode: 0644
state: hard
force: yes
loop:
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
when:
- cilium_identity_allocation_mode == "kvstore"
- name: Cilium | Create hubble dir
file:
path: "{{ kube_config_dir }}/addons/hubble"
state: directory
owner: root
group: root
mode: 0755
when:
- inventory_hostname == groups['kube_control_plane'][0]
- cilium_hubble_install
- name: Cilium | Create Cilium node manifests
template:
src: "{{ item.name }}/{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}"
mode: 0644
loop:
- {name: cilium, file: config.yml, type: cm}
- {name: cilium-operator, file: crb.yml, type: clusterrolebinding}
- {name: cilium-operator, file: cr.yml, type: clusterrole}
- {name: cilium, file: crb.yml, type: clusterrolebinding}
- {name: cilium, file: cr.yml, type: clusterrole}
- {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"}
- {name: cilium, file: ds.yml, type: ds}
- {name: cilium-operator, file: deploy.yml, type: deploy}
- {name: cilium-operator, file: sa.yml, type: sa}
- {name: cilium, file: sa.yml, type: sa}
register: cilium_node_manifests
when:
- inventory_hostname in groups['kube_control_plane']
- item.when | default(True) | bool
- name: Cilium | Create Cilium Hubble manifests
template:
src: "{{ item.name }}/{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}"
mode: 0644
loop:
- {name: hubble, file: config.yml, type: cm}
- {name: hubble, file: crb.yml, type: clusterrolebinding}
- {name: hubble, file: cr.yml, type: clusterrole}
- {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"}
- {name: hubble, file: deploy.yml, type: deploy}
- {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"}
- {name: hubble, file: sa.yml, type: sa}
- {name: hubble, file: service.yml, type: service}
register: cilium_hubble_manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- cilium_enable_hubble and cilium_hubble_install
- item.when | default(True) | bool
- name: Cilium | Enable portmap addon
template:
src: 000-cilium-portmap.conflist.j2
dest: /etc/cni/net.d/000-cilium-portmap.conflist
mode: 0644
when: cilium_enable_portmap
- name: Cilium | Copy Ciliumcli binary from download dir
copy:
src: "{{ local_release_dir }}/cilium"
dest: "{{ bin_dir }}/cilium"
mode: 0755
remote_src: yes

View File

@@ -0,0 +1,6 @@
---
- import_tasks: check.yml
- include_tasks: install.yml
- include_tasks: apply.yml

View File

@@ -0,0 +1,9 @@
---
- name: reset | check and remove devices if still present
include_tasks: reset_iface.yml
vars:
iface: "{{ item }}"
loop:
- cilium_host
- cilium_net
- cilium_vxlan

View File

@@ -0,0 +1,12 @@
---
- name: "reset | check if network device {{ iface }} is present"
stat:
path: "/sys/class/net/{{ iface }}"
get_attributes: no
get_checksum: no
get_mime: no
register: device_remains
- name: "reset | remove network device {{ iface }}"
command: "ip link del {{ iface }}"
when: device_remains.stat.exists

View File

@@ -0,0 +1,13 @@
{
"cniVersion": "0.3.1",
"name": "cilium-portmap",
"plugins": [
{
"type": "cilium-cni"
},
{
"type": "portmap",
"capabilities": { "portMappings": true }
}
]
}

View File

@@ -0,0 +1,146 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
- ciliumenvoyconfigs
{% endif %}
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- update
- watch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumbgploadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
{% endif %}

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system

View File

@@ -0,0 +1,166 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
spec:
replicas: {{ cilium_operator_replicas }}
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
{% if cilium_enable_prometheus %}
annotations:
prometheus.io/port: "{{ cilium_operator_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-operator
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
{% if cilium_operator_custom_args is string %}
- {{ cilium_operator_custom_args }}
{% else %}
{% for flag in cilium_operator_custom_args %}
- {{ flag }}
{% endfor %}
{% endif %}
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_ACCESS_KEY_ID
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_SECRET_ACCESS_KEY
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_DEFAULT_REGION
optional: true
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
{% if cilium_enable_prometheus %}
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
ports:
- name: prometheus
containerPort: {{ cilium_operator_scrape_port }}
hostPort: {{ cilium_operator_scrape_port }}
protocol: TCP
{% endif %}
livenessProbe:
httpGet:
{% if cilium_enable_ipv4 %}
host: 127.0.0.1
{% else %}
host: '::1'
{% endif %}
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_identity_allocation_mode == "kvstore" %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{cilium_cert_dir}}"
readOnly: true
{% endif %}
{% for volume_mount in cilium_operator_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }}
{% endfor %}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
io.cilium/app: operator
tolerations:
- operator: Exists
volumes:
- name: cilium-config-path
configMap:
name: cilium-config
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
{% endif %}
{% for volume in cilium_operator_extra_volumes %}
- {{ volume | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system

View File

@@ -0,0 +1,248 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
identity-allocation-mode: {{ cilium_identity_allocation_mode }}
{% if cilium_identity_allocation_mode == "kvstore" %}
# This etcd-config contains the etcd endpoints of your cluster. If you use
# TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
endpoints:
{% for ip_addr in etcd_access_addresses.split(',') %}
- {{ ip_addr }}
{% endfor %}
# In case you want to use TLS in etcd, uncomment the 'ca-file' line
# and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
# In case you want client to server authentication, uncomment the following
# lines and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt"
# kvstore
# https://docs.cilium.io/en/latest/cmdref/kvstore/
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
{% endif %}
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
{% if cilium_enable_prometheus %}
prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}"
operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}"
enable-metrics: "true"
{% endif %}
# If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}"
enable-ipv4: "{{ cilium_enable_ipv4 }}"
enable-ipv6: "{{ cilium_enable_ipv6 }}"
# If a serious issue occurs during Cilium startup, this
# invasive option may be set to true to remove all persistent
# state. Endpoints will not be restored using knowledge from a
# prior Cilium run, so they may receive new IP addresses upon
# restart. This also triggers clean-cilium-bpf-state.
clean-cilium-state: "false"
# If you want to clean cilium BPF state, set this to true;
# Removes all BPF maps from the filesystem. Upon restart,
# endpoints are restored with the same IP addresses, however
# any ongoing connections may be disrupted briefly.
# Loadbalancing decisions will be reset, so any ongoing
# connections via a service may be loadbalanced to a different
# backend after restart.
clean-cilium-bpf-state: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: "{{ cilium_monitor_aggregation }}"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "524288"
bpf-ct-global-any-max: "262144"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "{{ cilium_tunnel_mode }}"
# Enable Bandwidth Manager
# Ciliums bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
{% if cilium_enable_bandwidth_manager %}
enable-bandwidth-manager: "true"
{% endif %}
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "{{ cilium_cluster_name }}"
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
#cluster-id: 1
{% if cilium_cluster_id is defined %}
cluster-id: "{{ cilium_cluster_id }}"
{% endif %}
# `wait-bpf-mount` is removed after v1.10.4
# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da
{% if cilium_version | regex_replace('v') is version('1.10.4', '<') %}
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
{% endif %}
kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}"
# `native-routing-cidr` is deprecated in 1.10, removed in 1.12.
# Replaced by `ipv4-native-routing-cidr`
# https://github.com/cilium/cilium/pull/16695
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% else %}
{% if cilium_native_routing_cidr | length %}
ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% endif %}
{% if cilium_native_routing_cidr_ipv6 | length %}
ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}"
{% endif %}
{% endif %}
auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}"
operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}"
# Hubble settings
{% if cilium_enable_hubble %}
enable-hubble: "true"
{% if cilium_enable_hubble_metrics %}
hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}"
hubble-metrics:
{% for hubble_metrics_cycle in cilium_hubble_metrics %}
{{ hubble_metrics_cycle }}
{% endfor %}
{% endif %}
hubble-listen-address: ":4244"
{% if cilium_enable_hubble and cilium_hubble_install %}
hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
{% endif %}
{% endif %}
# IP Masquerade Agent
enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}"
{% for key, value in cilium_config_extra_vars.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
# Enable transparent network encryption
{% if cilium_encryption_enabled %}
{% if cilium_encryption_type == "ipsec" %}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
encrypt-node: "{{ cilium_ipsec_node_encryption }}"
{% endif %}
{% if cilium_encryption_type == "wireguard" %}
enable-wireguard: "true"
enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}"
{% endif %}
{% endif %}
# IPAM settings
ipam: "{{ cilium_ipam_mode }}"
agent-health-port: "{{ cilium_agent_health_port }}"
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_host_root != '' %}
cgroup-root: "{{ cilium_cgroup_host_root }}"
{% endif %}
bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}"
enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}"
enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}"
enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}"
enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}"
enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}"
enable-well-known-identities: "{{ cilium_enable_well_known_identities }}"
monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}"
enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}"
disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}"
{% if cilium_ip_masq_agent_enable %}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ip-masq-agent
namespace: kube-system
data:
config: |
nonMasqueradeCIDRs:
{% for cidr in cilium_non_masquerade_cidrs %}
- {{ cidr }}
{% endfor %}
masqLinkLocal: {{ cilium_masq_link_local|bool }}
resyncInterval: "{{ cilium_ip_masq_resync_interval }}"
{% endif %}

View File

@@ -0,0 +1,122 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
{% endif %}
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
# Deprecated for removal in v1.10
- create
- list
- watch
- update
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumegressnatpolicies
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11.5', '<') %}
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints/finalizers
- ciliumnodes/finalizers
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies/finalizers
{% endif %}
verbs:
- '*'
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- apiGroups:
- cilium.io
resources:
- ciliumclusterwideenvoyconfigs
- ciliumenvoyconfigs
- ciliumegressgatewaypolicies
verbs:
- list
- watch
{% endif %}

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system

View File

@@ -0,0 +1,424 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
{% if cilium_enable_prometheus %}
prometheus.io/port: "{{ cilium_agent_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels:
k8s-app: cilium
spec:
containers:
- name: cilium-agent
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
{% if cilium_mtu != "" %}
- --mtu={{ cilium_mtu }}
{% endif %}
{% if cilium_agent_custom_args is string %}
- {{ cilium_agent_custom_args }}
{% else %}
{% for flag in cilium_agent_custom_args %}
- {{ flag }}
{% endfor %}
{% endif %}
startupProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
livenessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
{% for env_var in cilium_agent_extra_env_vars %}
- {{ env_var | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
lifecycle:
postStart:
exec:
command:
- "/cni-install.sh"
- "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}"
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- "--enable-debug={{ cilium_debug | string | lower }}"
- "--log-file={{ cilium_cni_log_file }}"
{% endif %}
preStop:
exec:
command:
- /cni-uninstall.sh
resources:
limits:
cpu: {{ cilium_cpu_limit }}
memory: {{ cilium_memory_limit }}
requests:
cpu: {{ cilium_cpu_requests }}
memory: {{ cilium_memory_requests }}
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
ports:
{% endif %}
{% if cilium_enable_prometheus %}
- name: prometheus
containerPort: {{ cilium_agent_scrape_port }}
hostPort: {{ cilium_agent_scrape_port }}
protocol: TCP
{% endif %}
{% if cilium_enable_hubble_metrics %}
- name: hubble-metrics
containerPort: {{ cilium_hubble_scrape_port }}
hostPort: {{ cilium_hubble_scrape_port }}
protocol: TCP
{% endif %}
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: cilium-run
mountPath: /var/run/cilium
- name: cni-path
mountPath: /host/opt/cni/bin
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
{% if cilium_identity_allocation_mode == "kvstore" %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{cilium_cert_dir}}"
readOnly: true
{% endif %}
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
mountPath: /etc/config
readOnly: true
{% endif %}
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
mountPath: /etc/ipsec
readOnly: true
{% endif %}
{% if cilium_hubble_install %}
- name: hubble-tls
mountPath: /var/lib/cilium/tls/hubble
readOnly: true
{% endif %}
{% for volume_mount in cilium_agent_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
hostNetwork: true
initContainers:
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %}
- name: mount-cgroup
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: CGROUP_ROOT
value: {{ cilium_cgroup_host_root }}
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh and mount that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
securityContext:
privileged: true
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %}
- name: apply-sysctl-overwrites
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
securityContext:
privileged: true
{% endif %}
- name: clean-cilium-state
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
# Removed in 1.11 and up.
# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9
{% if cilium_version | regex_replace('v') is version('1.11', '<') %}
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
configMapKeyRef:
key: wait-bpf-mount
name: cilium-config
optional: true
{% endif %}
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: {{ cilium_cgroup_host_root }}
mountPropagation: HostToContainer
{% endif %}
- name: cilium-run
mountPath: /var/run/cilium
resources:
requests:
cpu: 100m
memory: 100Mi
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
hostNetwork: true
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
k8s-app: cilium
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
# To mount cgroup2 filesystem on the host
- name: hostproc
hostPath:
path: /proc
type: Directory
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: {{ cilium_cgroup_host_root }}
type: DirectoryOrCreate
{% endif %}
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
{% endif %}
# To read the clustermesh configuration
- name: clustermesh-secrets
secret:
secretName: cilium-clustermesh
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
optional: true
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
configMap:
name: ip-masq-agent
optional: true
items:
- key: config
path: ip-masq-agent
{% endif %}
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
secret:
secretName: cilium-ipsec-keys
{% endif %}
{% if cilium_hubble_install %}
- name: hubble-tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-server-certs
optional: true
items:
- key: ca.crt
path: client-ca.crt
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
{% endif %}

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system

View File

@@ -0,0 +1,9 @@
---
apiVersion: v1
data:
keys: {{ cilium_ipsec_key }}
kind: Secret
metadata:
name: cilium-ipsec-keys
namespace: kube-system
type: Opaque

View File

@@ -0,0 +1,87 @@
---
# Source: cilium/templates/hubble-relay-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-relay-config
namespace: kube-system
data:
config.yaml: |
peer-service: unix:///var/run/cilium/hubble.sock
listen-address: :4245
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
tls-client-key-file: /var/lib/hubble-relay/tls/client.key
tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
---
# Source: cilium/templates/hubble-ui-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-ui-envoy
namespace: kube-system
data:
envoy.yaml: |
static_resources:
listeners:
- name: listener_hubble_ui
address:
socket_address:
address: 0.0.0.0
port_value: 8081
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
config:
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ['*']
routes:
- match:
prefix: '/api/'
route:
cluster: backend
max_grpc_timeout: 0s
prefix_rewrite: '/'
- match:
prefix: '/'
route:
cluster: frontend
cors:
allow_origin_string_match:
- prefix: '*'
allow_methods: GET, PUT, DELETE, POST, OPTIONS
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
max_age: '1728000'
expose_headers: grpc-status,grpc-message
http_filters:
- name: envoy.filters.http.grpc_web
- name: envoy.filters.http.cors
- name: envoy.filters.http.router
clusters:
- name: frontend
connect_timeout: 0.25s
type: strict_dns
lb_policy: round_robin
hosts:
- socket_address:
address: 127.0.0.1
port_value: 8080
- name: backend
connect_timeout: 0.25s
type: logical_dns
lb_policy: round_robin
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 8090

View File

@@ -0,0 +1,106 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hubble-generate-certs
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- hubble-server-certs
- hubble-relay-client-certs
- hubble-relay-server-certs
verbs:
- update
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- hubble-ca-cert
verbs:
- update
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- hubble-ca-secret
verbs:
- get
{% endif %}
---
# Source: cilium/templates/hubble-relay-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-relay
rules:
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
---
# Source: cilium/templates/hubble-ui-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- "*"
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,44 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hubble-generate-certs
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-generate-certs
subjects:
- kind: ServiceAccount
name: hubble-generate-certs
namespace: kube-system
{% endif %}
---
# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-relay
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-relay
subjects:
- kind: ServiceAccount
namespace: kube-system
name: hubble-relay
---
# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-ui
subjects:
- kind: ServiceAccount
namespace: kube-system
name: hubble-ui

View File

@@ -0,0 +1,49 @@
---
# Source: cilium/templates/hubble-generate-certs-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: hubble-generate-certs
namespace: kube-system
labels:
k8s-app: hubble-generate-certs
spec:
schedule: "0 0 1 */4 *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
metadata:
labels:
k8s-app: hubble-generate-certs
spec:
serviceAccount: hubble-generate-certs
serviceAccountName: hubble-generate-certs
containers:
- name: certgen
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/usr/bin/cilium-certgen"
# Because this is executed as a job, we pass the values as command
# line args instead of via config map. This allows users to inspect
# the values used in past runs by inspecting the completed pod.
args:
- "--cilium-namespace=kube-system"
- "--hubble-ca-reuse-secret=true"
- "--hubble-ca-secret-name=hubble-ca-secret"
- "--hubble-ca-generate=true"
- "--hubble-ca-validity-duration=94608000s"
- "--hubble-ca-config-map-create=true"
- "--hubble-ca-config-map-name=hubble-ca-cert"
- "--hubble-server-cert-generate=true"
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
- "--hubble-server-cert-validity-duration=94608000s"
- "--hubble-server-cert-secret-name=hubble-server-certs"
- "--hubble-relay-client-cert-generate=true"
- "--hubble-relay-client-cert-validity-duration=94608000s"
- "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
- "--hubble-relay-server-cert-generate=false"
hostNetwork: true
restartPolicy: OnFailure
ttlSecondsAfterFinished: 1800

View File

@@ -0,0 +1,161 @@
---
# Source: cilium/templates/hubble-relay-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hubble-relay
labels:
k8s-app: hubble-relay
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-relay
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-relay
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "k8s-app"
operator: In
values:
- cilium
topologyKey: "kubernetes.io/hostname"
containers:
- name: hubble-relay
image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- hubble-relay
args:
- serve
ports:
- name: grpc
containerPort: 4245
readinessProbe:
tcpSocket:
port: grpc
livenessProbe:
tcpSocket:
port: grpc
volumeMounts:
- mountPath: /var/run/cilium
name: hubble-sock-dir
readOnly: true
- mountPath: /etc/hubble-relay
name: config
readOnly: true
- mountPath: /var/lib/hubble-relay/tls
name: tls
readOnly: true
restartPolicy: Always
serviceAccount: hubble-relay
serviceAccountName: hubble-relay
terminationGracePeriodSeconds: 0
volumes:
- configMap:
name: hubble-relay-config
items:
- key: config.yaml
path: config.yaml
name: config
- hostPath:
path: /var/run/cilium
type: Directory
name: hubble-sock-dir
- projected:
sources:
- secret:
name: hubble-relay-client-certs
items:
- key: tls.crt
path: client.crt
- key: tls.key
path: client.key
- configMap:
name: hubble-ca-cert
items:
- key: ca.crt
path: hubble-server-ca.crt
name: tls
---
# Source: cilium/templates/hubble-ui-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: kube-system
labels:
k8s-app: hubble-ui
name: hubble-ui
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-ui
template:
metadata:
annotations:
labels:
k8s-app: hubble-ui
spec:
securityContext:
runAsUser: 1001
serviceAccount: hubble-ui
serviceAccountName: hubble-ui
containers:
- name: frontend
image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 8080
name: http
resources:
{}
- name: backend
image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: EVENTS_SERVER_PORT
value: "8090"
- name: FLOWS_API_ADDR
value: "hubble-relay:80"
ports:
- containerPort: 8090
name: grpc
resources:
{}
- name: proxy
image: "{{ cilium_hubble_envoy_image_repo }}:{{ cilium_hubble_envoy_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 8081
name: http
resources:
{}
command: ["envoy"]
args:
[
"-c",
"/etc/envoy.yaml",
"-l",
"info"
]
volumeMounts:
- name: hubble-ui-envoy-yaml
mountPath: /etc/envoy.yaml
subPath: envoy.yaml
volumes:
- name: hubble-ui-envoy-yaml
configMap:
name: hubble-ui-envoy

View File

@@ -0,0 +1,45 @@
---
# Source: cilium/templates/hubble-generate-certs-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: hubble-generate-certs
namespace: kube-system
labels:
k8s-app: hubble-generate-certs
spec:
template:
metadata:
labels:
k8s-app: hubble-generate-certs
spec:
serviceAccount: hubble-generate-certs
serviceAccountName: hubble-generate-certs
containers:
- name: certgen
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/usr/bin/cilium-certgen"
# Because this is executed as a job, we pass the values as command
# line args instead of via config map. This allows users to inspect
# the values used in past runs by inspecting the completed pod.
args:
- "--cilium-namespace=kube-system"
- "--hubble-ca-reuse-secret=true"
- "--hubble-ca-secret-name=hubble-ca-secret"
- "--hubble-ca-generate=true"
- "--hubble-ca-validity-duration=94608000s"
- "--hubble-ca-config-map-create=true"
- "--hubble-ca-config-map-name=hubble-ca-cert"
- "--hubble-server-cert-generate=true"
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
- "--hubble-server-cert-validity-duration=94608000s"
- "--hubble-server-cert-secret-name=hubble-server-certs"
- "--hubble-relay-client-cert-generate=true"
- "--hubble-relay-client-cert-validity-duration=94608000s"
- "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
- "--hubble-relay-server-cert-generate=false"
hostNetwork: true
restartPolicy: OnFailure
ttlSecondsAfterFinished: 1800

View File

@@ -0,0 +1,23 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-generate-certs
namespace: kube-system
{% endif %}
---
# Source: cilium/templates/hubble-relay-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-relay
namespace: kube-system
---
# Source: cilium/templates/hubble-ui-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-ui
namespace: kube-system

View File

@@ -0,0 +1,58 @@
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
---
# Source: cilium/templates/cilium-agent-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-metrics
namespace: kube-system
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: "9091"
labels:
k8s-app: hubble
spec:
clusterIP: None
type: ClusterIP
ports:
- name: hubble-metrics
port: 9091
protocol: TCP
targetPort: hubble-metrics
selector:
k8s-app: cilium
{% endif %}
---
# Source: cilium/templates/hubble-relay-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-relay
namespace: kube-system
labels:
k8s-app: hubble-relay
spec:
type: ClusterIP
selector:
k8s-app: hubble-relay
ports:
- protocol: TCP
port: 80
targetPort: 4245
---
# Source: cilium/templates/hubble-ui-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-ui
labels:
k8s-app: hubble-ui
namespace: kube-system
spec:
selector:
k8s-app: hubble-ui
ports:
- name: http
port: 80
targetPort: 8081
type: ClusterIP

View File

@@ -0,0 +1,15 @@
---
- name: CNI | make sure /opt/cni/bin exists
file:
path: /opt/cni/bin
state: directory
mode: 0755
owner: "{{ kube_owner }}"
recurse: true
- name: CNI | Copy cni plugins
unarchive:
src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
dest: "/opt/cni/bin"
mode: 0755
remote_src: yes

View File

@@ -0,0 +1,28 @@
---
# Flannel public IP
# The address that flannel should advertise as how to access the system
# Disabled until https://github.com/coreos/flannel/issues/712 is fixed
# flannel_public_ip: "{{ access_ip|default(ip|default(fallback_ips[inventory_hostname])) }}"
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item
# flannel_interface:
## Select interface that should be used for flannel operations by regexp on Name or IP
## This is actually an inventory cluster-level item
## example: select interface with ip from net 10.0.0.0/23
## single quote and escape backslashes
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
# You can choose what type of flannel backend to use
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
flannel_backend_type: "vxlan"
flannel_vxlan_vni: 1
flannel_vxlan_port: 8472
flannel_vxlan_direct_routing: false
# Limits for apps
flannel_memory_limit: 500M
flannel_cpu_limit: 300m
flannel_memory_requests: 64M
flannel_cpu_requests: 150m

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: network_plugin/cni

View File

@@ -0,0 +1,21 @@
---
- name: Flannel | Stop if kernel version is too low for Flannel Wireguard encryption
assert:
that: ansible_kernel.split('-')[0] is version('5.6.0', '>=')
when:
- kube_network_plugin == 'flannel'
- flannel_backend_type == 'wireguard'
- not ignore_assert_errors
- name: Flannel | Create Flannel manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: flannel, file: cni-flannel-rbac.yml, type: sa}
- {name: kube-flannel, file: cni-flannel.yml, type: ds}
register: flannel_node_manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,24 @@
---
- name: reset | check cni network device
stat:
path: /sys/class/net/cni0
get_attributes: no
get_checksum: no
get_mime: no
register: cni
- name: reset | remove the network device created by the flannel
command: ip link del cni0
when: cni.stat.exists
- name: reset | check flannel network device
stat:
path: /sys/class/net/flannel.1
get_attributes: no
get_checksum: no
get_mime: no
register: flannel
- name: reset | remove the network device created by the flannel
command: ip link del flannel.1
when: flannel.stat.exists

View File

@@ -0,0 +1,44 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system

View File

@@ -0,0 +1,170 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{ kube_pods_subnet }}",
"EnableIPv4": true,
{% if enable_dual_stack_networks %}
"EnableIPv6": true,
"IPv6Network": "{{ kube_pods_subnet_ipv6 }}",
{% endif %}
"Backend": {
"Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %},
"VNI": {{ flannel_vxlan_vni }},
"Port": {{ flannel_vxlan_port }},
"DirectRouting": {{ flannel_vxlan_direct_routing | to_json }}
{% endif %}
}
}
{% for arch in ['amd64', 'arm64', 'arm', 'ppc64le', 's390x'] %}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
{% if arch == 'amd64' %}
name: kube-flannel
{% else %}
name: kube-flannel-ds-{{ arch }}
{% endif %}
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
priorityClassName: system-node-critical
serviceAccountName: flannel
containers:
- name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ flannel_cpu_limit }}
memory: {{ flannel_memory_limit }}
requests:
cpu: {{ flannel_cpu_requests }}
memory: {{ flannel_memory_requests }}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ]
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- {{ arch }}
initContainers:
- name: install-cni-plugin
image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag | regex_replace(image_arch,'') }}{{ arch }}
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }}
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations:
- operator: Exists
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: cni-plugin
hostPath:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate
{% endfor %}

View File

@@ -0,0 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
emeritus_approvers:
- oilbeater

View File

@@ -0,0 +1,98 @@
---
kube_ovn_db_cpu_request: 500m
kube_ovn_db_memory_request: 200Mi
kube_ovn_db_cpu_limit: 3000m
kube_ovn_db_memory_limit: 3000Mi
kube_ovn_node_cpu_request: 200m
kube_ovn_node_memory_request: 200Mi
kube_ovn_node_cpu_limit: 1000m
kube_ovn_node_memory_limit: 800Mi
kube_ovn_cni_server_cpu_request: 200m
kube_ovn_cni_server_memory_request: 200Mi
kube_ovn_cni_server_cpu_limit: 1000m
kube_ovn_cni_server_memory_limit: 1Gi
kube_ovn_controller_cpu_request: 200m
kube_ovn_controller_memory_request: 200Mi
kube_ovn_controller_cpu_limit: 1000m
kube_ovn_controller_memory_limit: 1Gi
kube_ovn_pinger_cpu_request: 100m
kube_ovn_pinger_memory_request: 200Mi
kube_ovn_pinger_cpu_limit: 200m
kube_ovn_pinger_memory_limit: 400Mi
kube_ovn_monitor_memory_request: 200Mi
kube_ovn_monitor_cpu_request: 200m
kube_ovn_monitor_memory_limit: 200Mi
kube_ovn_monitor_cpu_limit: 200m
kube_ovn_dpdk_node_cpu_request: 1000m
kube_ovn_dpdk_node_memory_request: 2Gi
kube_ovn_dpdk_node_cpu_limit: 1000m
kube_ovn_dpdk_node_memory_limit: 2Gi
kube_ovn_central_replics: 1
kube_ovn_controller_replics: 1
# geneve or vlan
kube_ovn_network_type: geneve
# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module
kube_ovn_tunnel_type: geneve
## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use.
# kube_ovn_iface: eth1
## The MTU used by pod iface in overlay networks (default iface MTU - 100)
# kube_ovn_mtu: 1333
## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port.
kube_ovn_hw_offload: false
# traffic mirror
kube_ovn_traffic_mirror: false
# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# kube_ovn_default_interface_name: eth0
kube_ovn_external_address: 8.8.8.8
kube_ovn_external_address_ipv6: 2400:3200::1
kube_ovn_external_dns: alauda.cn
# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0
kube_ovn_default_gateway_check: true
kube_ovn_default_logical_gateway: false
# kube_ovn_default_exclude_ips: 10.16.0.1
kube_ovn_node_switch_cidr: 100.64.0.0/16
kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64
## vlan config, set default interface name and vlan id
# kube_ovn_default_interface_name: eth0
kube_ovn_default_vlan_id: 100
kube_ovn_vlan_name: product
## pod nic type, support: veth-pair or internal-port
kube_ovn_pod_nic_type: veth_pair
## Enable load balancer
kube_ovn_enable_lb: true
## Enable network policy support
kube_ovn_enable_np: true
## Enable external vpc support
kube_ovn_enable_external_vpc: true
## Enable checksum
kube_ovn_encap_checksum: true
## enable ssl
kube_ovn_enable_ssl: false
## dpdk
kube_ovn_dpdk_enabled: false
kube_ovn_dpdk_tunnel_iface: br-phy
## eip snat
kube_ovn_eip_snat_enabled: true
## keep vm ip
kube_ovn_keep_vm_ip: true
## cni config priority, default: 01
kube_ovn_cni_config_priority: 01

View File

@@ -0,0 +1,17 @@
---
- name: Kube-OVN | Label ovn-db node
command: >-
{{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kube-OVN | Create Kube-OVN manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- {name: kube-ovn-crd, file: cni-kube-ovn-crd.yml}
- {name: ovn, file: cni-ovn.yml}
- {name: kube-ovn, file: cni-kube-ovn.yml}
register: kube_ovn_node_manifests

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,610 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-controller
namespace: kube-system
annotations:
kubernetes.io/description: |
kube-ovn controller
spec:
replicas: {{ kube_ovn_controller_replics }}
selector:
matchLabels:
app: kube-ovn-controller
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-controller
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-controller
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-controller
image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /kube-ovn/start-controller.sh
args:
- --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{''}}
- --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{''}}
- --default-gateway-check={{ kube_ovn_default_gateway_check|string }}
- --default-logical-gateway={{ kube_ovn_default_logical_gateway|string }}
- --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{''}}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{''}}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}}
- --network-type={{ kube_ovn_network_type }}
- --default-interface-name={{ kube_ovn_default_interface_name|default('') }}
- --default-vlan-id={{ kube_ovn_default_vlan_id }}
- --pod-nic-type={{ kube_ovn_pod_nic_type }}
- --enable-lb={{ kube_ovn_enable_lb|string }}
- --enable-np={{ kube_ovn_enable_np|string }}
- --enable-eip-snat={{ kube_ovn_eip_snat_enabled }}
- --enable-external-vpc={{ kube_ovn_enable_external_vpc|string }}
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-controller.log
- --log_file_max_size=0
- --keep-vm-ip={{ kube_ovn_keep_vm_ip }}
env:
- name: ENABLE_SSL
value: "{{ kube_ovn_enable_ssl | lower }}"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- /kube-ovn/kube-ovn-controller-healthcheck
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- /kube-ovn/kube-ovn-controller-healthcheck
initialDelaySeconds: 300
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: {{ kube_ovn_controller_cpu_request }}
memory: {{ kube_ovn_controller_memory_request }}
limits:
cpu: {{ kube_ovn_controller_cpu_limit }}
memory: {{ kube_ovn_controller_memory_limit }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-cni
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the kube-ovn cni daemon.
spec:
selector:
matchLabels:
app: kube-ovn-cni
template:
metadata:
labels:
app: kube-ovn-cni
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
initContainers:
- name: install-cni
image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/kube-ovn/install-cni.sh"]
securityContext:
runAsUser: 0
privileged: true
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-bin
containers:
- name: cni-server
image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- bash
- /kube-ovn/start-cniserver.sh
args:
- --enable-mirror={{ kube_ovn_traffic_mirror | lower }}
- --encap-checksum={{ kube_ovn_encap_checksum | lower }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}}
- --iface={{ kube_ovn_iface|default('') }}
- --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }}
- --network-type={{ kube_ovn_network_type }}
- --default-interface-name={{ kube_ovn_default_interface_name|default('') }}
{% if kube_ovn_mtu is defined %}
- --mtu={{ kube_ovn_mtu }}
{% endif %}
- --cni-conf-name={{ kube_ovn_cni_config_priority }}-kube-ovn.conflist
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-cni.log
- --log_file_max_size=0
securityContext:
runAsUser: 0
privileged: true
env:
- name: kube_ovn_enable_ssl
value: "{{ kube_ovn_enable_ssl | lower }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MODULES
value: kube_ovn_fastpath.ko
- name: RPMS
value: openvswitch-kmod
volumeMounts:
- name: host-modules
mountPath: /lib/modules
readOnly: true
- name: shared-dir
mountPath: /var/lib/kubelet/pods
- mountPath: /etc/openvswitch
name: systemid
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /run/openvswitch
name: host-run-ovs
mountPropagation: Bidirectional
- mountPath: /run/ovn
name: host-run-ovn
- mountPath: /var/run/netns
name: host-ns
mountPropagation: HostToContainer
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /tmp
name: tmp
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 7
successThreshold: 1
tcpSocket:
port: 10665
timeoutSeconds: 3
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 7
successThreshold: 1
tcpSocket:
port: 10665
timeoutSeconds: 3
resources:
requests:
cpu: {{ kube_ovn_cni_server_cpu_request }}
memory: {{ kube_ovn_cni_server_memory_request }}
limits:
cpu: {{ kube_ovn_cni_server_cpu_limit }}
memory: {{ kube_ovn_cni_server_memory_limit }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: shared-dir
hostPath:
path: /var/lib/kubelet/pods
- name: systemid
hostPath:
path: /etc/origin/openvswitch
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: cni-bin
hostPath:
path: /opt/cni/bin
- name: host-ns
hostPath:
path: /var/run/netns
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: tmp
hostPath:
path: /tmp
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-pinger
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: kube-ovn-pinger
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-pinger
component: network
type: infra
spec:
serviceAccountName: ovn
hostPID: true
containers:
- name: pinger
image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /kube-ovn/kube-ovn-pinger
args:
- --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{''}}
- --external-dns={{ kube_ovn_external_dns }}
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-pinger.log
- --log_file_max_size=0
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "{{ kube_ovn_enable_ssl | lower }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
resources:
requests:
cpu: {{ kube_ovn_pinger_cpu_request }}
memory: {{ kube_ovn_pinger_memory_request }}
limits:
cpu: {{ kube_ovn_pinger_cpu_limit }}
memory: {{ kube_ovn_pinger_memory_limit }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-monitor
namespace: kube-system
annotations:
kubernetes.io/description: |
Metrics for OVN components: northd, nb and sb.
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: kube-ovn-monitor
template:
metadata:
labels:
app: kube-ovn-monitor
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-monitor
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-monitor
image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/kube-ovn/start-ovn-monitor.sh"]
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "{{ kube_ovn_enable_ssl | lower }}"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
cpu: {{ kube_ovn_monitor_cpu_request }}
memory: {{ kube_ovn_monitor_memory_request }}
limits:
cpu: {{ kube_ovn_monitor_cpu_limit }}
memory: {{ kube_ovn_monitor_memory_limit }}
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- cat
- /var/run/ovn/ovn-controller.pid
periodSeconds: 10
timeoutSeconds: 45
livenessProbe:
exec:
command:
- cat
- /var/run/ovn/ovn-controller.pid
initialDelaySeconds: 30
periodSeconds: 10
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-monitor
namespace: kube-system
labels:
app: kube-ovn-monitor
spec:
ports:
- name: metrics
port: 10661
type: ClusterIP
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: kube-ovn-monitor
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-pinger
namespace: kube-system
labels:
app: kube-ovn-pinger
spec:
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: kube-ovn-pinger
ports:
- port: 8080
name: metrics
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-controller
namespace: kube-system
labels:
app: kube-ovn-controller
spec:
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: kube-ovn-controller
ports:
- port: 10660
name: metrics
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-cni
namespace: kube-system
labels:
app: kube-ovn-cni
spec:
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: kube-ovn-cni
ports:
- port: 10665
name: metrics

View File

@@ -0,0 +1,513 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- kube-ovn
- apiGroups:
- "kubeovn.io"
resources:
- vpcs
- vpcs/status
- vpc-nat-gateways
- subnets
- subnets/status
- ips
- vips
- vips/status
- vlans
- vlans/status
- provider-networks
- provider-networks/status
- security-groups
- security-groups/status
- htbqoses
- iptables-eips
- iptables-fip-rules
- iptables-dnat-rules
- iptables-snat-rules
- iptables-eips/status
- iptables-fip-rules/status
- iptables-dnat-rules/status
- iptables-snat-rules/status
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- pods/exec
- namespaces
- nodes
- configmaps
verbs:
- create
- get
- list
- watch
- patch
- update
- apiGroups:
- "k8s.cni.cncf.io"
resources:
- network-attachment-definitions
verbs:
- create
- delete
- get
- list
- update
- apiGroups:
- ""
- networking.k8s.io
- apps
- extensions
resources:
- networkpolicies
- services
- endpoints
- statefulsets
- daemonsets
- deployments
- deployments/scale
verbs:
- create
- delete
- update
- patch
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- "*"
- apiGroups:
- "k8s.cni.cncf.io"
resources:
- network-attachment-definitions
verbs:
- create
- delete
- get
- list
- update
- apiGroups:
- "kubevirt.io"
resources:
- virtualmachines
- virtualmachineinstances
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn
roleRef:
name: system:ovn
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: kube-system
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: ovn-central
ovn-nb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: kube-system
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: ovn-central
ovn-sb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-northd
namespace: kube-system
spec:
ports:
- name: ovn-northd
protocol: TCP
port: 6643
targetPort: 6643
type: ClusterIP
{% if enable_dual_stack_networks %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
app: ovn-central
ovn-northd-leader: "true"
sessionAffinity: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: kube-system
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: {{ kube_ovn_central_replics }}
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/kube-ovn/start-db.sh"]
securityContext:
capabilities:
add: ["SYS_NICE"]
env:
- name: ENABLE_SSL
value: "{{ kube_ovn_enable_ssl | lower }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: {{ kube_ovn_db_cpu_request }}
memory: {{ kube_ovn_db_memory_request }}
limits:
cpu: {{ kube_ovn_db_cpu_limit }}
memory: {{ kube_ovn_db_memory_limit }}
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-healthcheck.sh
periodSeconds: 15
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 15
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: OnDelete
template:
metadata:
labels:
app: ovs
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: {% if kube_ovn_dpdk_enabled %}{{ kube_ovn_dpdk_container_image_repo }}:{{ kube_ovn_dpdk_container_image_tag }}{% else %}{{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}{% endif %}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: [{% if kube_ovn_dpdk_enabled %}"/kube-ovn/start-ovs-dpdk.sh"{% else %}"/kube-ovn/start-ovs.sh"{% endif %}]
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "{{ kube_ovn_enable_ssl | lower }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
{% if not kube_ovn_dpdk_enabled %}
- name: HW_OFFLOAD
value: "{{ kube_ovn_hw_offload | string | lower }}"
- name: TUNNEL_TYPE
value: "{{ kube_ovn_tunnel_type }}"
{% endif %}
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /var/run/netns
name: host-ns
mountPropagation: HostToContainer
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
{% if kube_ovn_dpdk_enabled %}
- mountPath: /opt/ovs-config
name: host-config-ovs
- mountPath: /dev/hugepages
name: hugepage
{% endif %}
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
{% if kube_ovn_dpdk_enabled %}
- /kube-ovn/ovs-dpdk-healthcheck.sh
{% else %}
- /kube-ovn/ovs-healthcheck.sh
{% endif %}
periodSeconds: 5
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
{% if kube_ovn_dpdk_enabled %}
- /kube-ovn/ovs-dpdk-healthcheck.sh
{% else %}
- /kube-ovn/ovs-healthcheck.sh
{% endif %}
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
timeoutSeconds: 45
resources:
{% if kube_ovn_dpdk_enabled %}
requests:
cpu: {{ kube_ovn_dpdk_node_cpu_request }}
memory: {{ kube_ovn_dpdk_node_memory_request }}
limits:
cpu: {{ kube_ovn_dpdk_node_cpu_limit }}
memory: {{ kube_ovn_dpdk_node_memory_limit }}
hugepages-1Gi: 1Gi
{% else %}
requests:
cpu: {{ kube_ovn_node_cpu_request }}
memory: {{ kube_ovn_node_memory_request }}
limits:
cpu: {{ kube_ovn_node_cpu_limit }}
memory: {{ kube_ovn_node_memory_limit }}
{% endif %}
nodeSelector:
kubernetes.io/os: "linux"
ovn.kubernetes.io/ovs_dp_type: "kernel"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-ns
hostPath:
path: /var/run/netns
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
{% if kube_ovn_dpdk_enabled %}
- name: host-config-ovs
hostPath:
path: /opt/ovs-config
type: DirectoryOrCreate
- name: hugepage
emptyDir:
medium: HugePages
{% endif %}
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls

View File

@@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- bozzo
reviewers:
- bozzo

View File

@@ -0,0 +1,66 @@
---
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
kube_router_run_router: true
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
kube_router_run_firewall: true
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
# see docs/kube-router.md "Caveats" section
kube_router_run_service_proxy: false
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
kube_router_advertise_cluster_ip: false
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
kube_router_advertise_external_ip: false
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
kube_router_advertise_loadbalancer_ip: false
# Adjust manifest of kube-router daemonset template with DSR needed changes
kube_router_enable_dsr: false
# Array of arbitrary extra arguments to kube-router, see
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
kube_router_extra_args: []
# ASN number of the cluster, used when communicating with external BGP routers
kube_router_cluster_asn: ~
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
kube_router_peer_router_asns: ~
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
kube_router_peer_router_ips: ~
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
kube_router_peer_router_ports: ~
# Setups node CNI to allow hairpin mode, requires node reboots, see
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
kube_router_support_hairpin_mode: false
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
kube_router_dns_policy: ClusterFirstWithHostNet
# Adds annotations to kubernetes nodes for advanced configuration of BGP Peers.
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md
# Array of annotations for master
kube_router_annotations_master: []
# Array of annotations for every node
kube_router_annotations_node: []
# Array of common annotations for every node
kube_router_annotations_all: []
# Enables scraping kube-router metrics with Prometheus
kube_router_enable_metrics: false
# Path to serve Prometheus metrics on
kube_router_metrics_path: /metrics
# Prometheus metrics port to use
kube_router_metrics_port: 9255

View File

@@ -0,0 +1,20 @@
---
- name: reset_kube_router
command: /bin/true
notify:
- Kube-router | delete kube-router docker containers
- Kube-router | delete kube-router crio/containerd containers
- name: Kube-router | delete kube-router docker containers
shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f"
register: docker_kube_router_remove
until: docker_kube_router_remove is succeeded
retries: 5
when: container_manager in ["docker"]
- name: Kube-router | delete kube-router crio/containerd containers
shell: '{{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
register: crictl_kube_router_remove
until: crictl_kube_router_remove is succeeded
retries: 5
when: container_manager in ["crio", "containerd"]

Some files were not shown because too many files have changed in this diff Show More