dsk-dev kubespray 이동
This commit is contained in:
256
ansible/kubespray/roles/network_plugin/cilium/defaults/main.yml
Normal file
256
ansible/kubespray/roles/network_plugin/cilium/defaults/main.yml
Normal file
@@ -0,0 +1,256 @@
|
||||
---
|
||||
cilium_min_version_required: "1.10"
|
||||
# Log-level
|
||||
cilium_debug: false
|
||||
|
||||
cilium_mtu: ""
|
||||
cilium_enable_ipv4: true
|
||||
cilium_enable_ipv6: false
|
||||
|
||||
# Cilium agent health port
|
||||
cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}"
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# `kubectl get ciliumid`
|
||||
# - "kvstore" stores identities in an etcd kvstore.
|
||||
# - In order to support External Workloads, "crd" is required
|
||||
# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta
|
||||
# - KVStore operations are only required when cilium-operator is running with any of the below options:
|
||||
# - --synchronize-k8s-services
|
||||
# - --synchronize-k8s-nodes
|
||||
# - --identity-allocation-mode=kvstore
|
||||
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
|
||||
cilium_identity_allocation_mode: kvstore
|
||||
|
||||
# Etcd SSL dirs
|
||||
cilium_cert_dir: /etc/cilium/certs
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Limits for apps
|
||||
cilium_memory_limit: 500M
|
||||
cilium_cpu_limit: 500m
|
||||
cilium_memory_requests: 64M
|
||||
cilium_cpu_requests: 100m
|
||||
|
||||
# Overlay Network Mode
|
||||
cilium_tunnel_mode: vxlan
|
||||
# Optional features
|
||||
cilium_enable_prometheus: false
|
||||
# Enable if you want to make use of hostPort mappings
|
||||
cilium_enable_portmap: false
|
||||
# Monitor aggregation level (none/low/medium/maximum)
|
||||
cilium_monitor_aggregation: medium
|
||||
# Kube Proxy Replacement mode (strict/probe/partial)
|
||||
cilium_kube_proxy_replacement: probe
|
||||
|
||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||
# to prevent service disruptions. See also:
|
||||
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||
cilium_preallocate_bpf_maps: false
|
||||
|
||||
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
|
||||
cilium_tofqdns_enable_poller: false
|
||||
|
||||
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
|
||||
cilium_enable_legacy_services: false
|
||||
|
||||
# Deploy cilium even if kube_network_plugin is not cilium.
|
||||
# This enables to deploy cilium alongside another CNI to replace kube-proxy.
|
||||
cilium_deploy_additionally: false
|
||||
|
||||
# Auto direct nodes routes can be used to advertise pods routes in your cluster
|
||||
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
|
||||
# This works only if you have a L2 connectivity between all your nodes.
|
||||
# You wil also have to specify the variable `cilium_native_routing_cidr` to
|
||||
# make this work. Please refer to the cilium documentation for more
|
||||
# information about this kind of setups.
|
||||
cilium_auto_direct_node_routes: false
|
||||
|
||||
# Allows to explicitly specify the IPv4 CIDR for native routing.
|
||||
# When specified, Cilium assumes networking for this CIDR is preconfigured and
|
||||
# hands traffic destined for that range to the Linux network stack without
|
||||
# applying any SNAT.
|
||||
# Generally speaking, specifying a native routing CIDR implies that Cilium can
|
||||
# depend on the underlying networking stack to route packets to their
|
||||
# destination. To offer a concrete example, if Cilium is configured to use
|
||||
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
|
||||
# the user must configure the routes to reach pods, either manually or by
|
||||
# setting the auto-direct-node-routes flag.
|
||||
cilium_native_routing_cidr: ""
|
||||
|
||||
# Allows to explicitly specify the IPv6 CIDR for native routing.
|
||||
cilium_native_routing_cidr_ipv6: ""
|
||||
|
||||
# Enable transparent network encryption.
|
||||
cilium_encryption_enabled: false
|
||||
|
||||
# Encryption method. Can be either ipsec or wireguard.
|
||||
# Only effective when `cilium_encryption_enabled` is set to true.
|
||||
cilium_encryption_type: "ipsec"
|
||||
|
||||
# Enable encryption for pure node to node traffic.
|
||||
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
|
||||
cilium_ipsec_node_encryption: false
|
||||
|
||||
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
|
||||
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
|
||||
# it will fallback on the wireguard-go user-space implementation of WireGuard.
|
||||
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
|
||||
cilium_wireguard_userspace_fallback: false
|
||||
|
||||
# Enable Bandwidth Manager
|
||||
# Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||||
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||||
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||||
cilium_enable_bandwidth_manager: false
|
||||
|
||||
# IP Masquerade Agent
|
||||
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
||||
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
||||
cilium_ip_masq_agent_enable: false
|
||||
|
||||
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
||||
cilium_non_masquerade_cidrs:
|
||||
- 10.0.0.0/8
|
||||
- 172.16.0.0/12
|
||||
- 192.168.0.0/16
|
||||
- 100.64.0.0/10
|
||||
- 192.0.0.0/24
|
||||
- 192.0.2.0/24
|
||||
- 192.88.99.0/24
|
||||
- 198.18.0.0/15
|
||||
- 198.51.100.0/24
|
||||
- 203.0.113.0/24
|
||||
- 240.0.0.0/4
|
||||
### Indicates whether to masquerade traffic to the link local prefix.
|
||||
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
|
||||
cilium_masq_link_local: false
|
||||
### A time interval at which the agent attempts to reload config from disk
|
||||
cilium_ip_masq_resync_interval: 60s
|
||||
|
||||
# Hubble
|
||||
### Enable Hubble without install
|
||||
cilium_enable_hubble: false
|
||||
### Enable Hubble Metrics
|
||||
cilium_enable_hubble_metrics: false
|
||||
### if cilium_enable_hubble_metrics: true
|
||||
cilium_hubble_metrics: {}
|
||||
# - dns
|
||||
# - drop
|
||||
# - tcp
|
||||
# - flow
|
||||
# - icmp
|
||||
# - http
|
||||
### Enable Hubble install
|
||||
cilium_hubble_install: false
|
||||
### Enable auto generate certs if cilium_hubble_install: true
|
||||
cilium_hubble_tls_generate: false
|
||||
|
||||
# IP address management mode for v1.9+.
|
||||
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
||||
cilium_ipam_mode: kubernetes
|
||||
|
||||
# Extra arguments for the Cilium agent
|
||||
cilium_agent_custom_args: []
|
||||
|
||||
# For adding and mounting extra volumes to the cilium agent
|
||||
cilium_agent_extra_volumes: []
|
||||
cilium_agent_extra_volume_mounts: []
|
||||
|
||||
cilium_agent_extra_env_vars: []
|
||||
|
||||
cilium_operator_replicas: 2
|
||||
|
||||
# The address at which the cillium operator bind health check api
|
||||
cilium_operator_api_serve_addr: "127.0.0.1:9234"
|
||||
|
||||
## A dictionary of extra config variables to add to cilium-config, formatted like:
|
||||
## cilium_config_extra_vars:
|
||||
## var1: "value1"
|
||||
## var2: "value2"
|
||||
cilium_config_extra_vars: {}
|
||||
|
||||
# For adding and mounting extra volumes to the cilium operator
|
||||
cilium_operator_extra_volumes: []
|
||||
cilium_operator_extra_volume_mounts: []
|
||||
|
||||
# Extra arguments for the Cilium Operator
|
||||
cilium_operator_custom_args: []
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cilium_cluster_name: default
|
||||
|
||||
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
|
||||
# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime.
|
||||
# Available for Cilium v1.10 and up.
|
||||
cilium_cni_exclusive: true
|
||||
|
||||
# Configure the log file for CNI logging with retention policy of 7 days.
|
||||
# Disable CNI file logging by setting this field to empty explicitly.
|
||||
# Available for Cilium v1.12 and up.
|
||||
cilium_cni_log_file: "/var/run/cilium/cilium-cni.log"
|
||||
|
||||
# -- Configure cgroup related configuration
|
||||
# -- Enable auto mount of cgroup2 filesystem.
|
||||
# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at
|
||||
# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod.
|
||||
# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted
|
||||
# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the
|
||||
# volume will be mounted inside the cilium agent pod at the same path.
|
||||
# Available for Cilium v1.11 and up
|
||||
cilium_cgroup_auto_mount: true
|
||||
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host
|
||||
cilium_cgroup_host_root: "/run/cilium/cgroupv2"
|
||||
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
cilium_bpf_map_dynamic_size_ratio: "0.0025"
|
||||
|
||||
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
|
||||
# Available for Cilium v1.10 and up
|
||||
cilium_enable_ipv4_masquerade: true
|
||||
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
|
||||
# Available for Cilium v1.10 and up
|
||||
cilium_enable_ipv6_masquerade: true
|
||||
|
||||
# -- Enable native IP masquerade support in eBPF
|
||||
cilium_enable_bpf_masquerade: false
|
||||
|
||||
# -- Configure whether direct routing mode should route traffic via
|
||||
# host stack (true) or directly and more efficiently out of BPF (false) if
|
||||
# the kernel supports it. The latter has the implication that it will also
|
||||
# bypass netfilter in the host namespace.
|
||||
cilium_enable_host_legacy_routing: true
|
||||
|
||||
# -- Enable use of the remote node identity.
|
||||
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
|
||||
cilium_enable_remote_node_identity: true
|
||||
|
||||
# -- Enable the use of well-known identities.
|
||||
cilium_enable_well_known_identities: false
|
||||
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
cilium_monitor_aggregation_flags: "all"
|
||||
|
||||
cilium_enable_bpf_clock_probe: true
|
||||
|
||||
# -- Whether to enable CNP status updates.
|
||||
cilium_disable_cnp_status_updates: true
|
||||
|
||||
# Configure how long to wait for the Cilium DaemonSet to be ready again
|
||||
cilium_rolling_restart_wait_retries_count: 30
|
||||
cilium_rolling_restart_wait_retries_delay_seconds: 10
|
||||
|
||||
# Cilium changed the default metrics exporter ports in 1.12
|
||||
cilium_agent_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9962', '9090') }}"
|
||||
cilium_operator_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9963', '6942') }}"
|
||||
cilium_hubble_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9965', '9091') }}"
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: network_plugin/cni
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Cilium | Start Resources
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}"
|
||||
state: "latest"
|
||||
loop: "{{ cilium_node_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||
|
||||
- name: Cilium | Wait for pods to run
|
||||
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
||||
register: pods_not_ready
|
||||
until: pods_not_ready.stdout.find("cilium")==-1
|
||||
retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"
|
||||
delay: "{{ cilium_rolling_restart_wait_retries_delay_seconds | int }}"
|
||||
failed_when: false
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: Cilium | Hubble install
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}"
|
||||
state: "latest"
|
||||
loop: "{{ cilium_hubble_manifests.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||
- cilium_enable_hubble and cilium_hubble_install
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
- name: Cilium | Check Cilium encryption `cilium_ipsec_key` for ipsec
|
||||
assert:
|
||||
that:
|
||||
- "cilium_ipsec_key is defined"
|
||||
msg: "cilium_ipsec_key should be defined to enable encryption using ipsec"
|
||||
when:
|
||||
- cilium_encryption_enabled
|
||||
- cilium_encryption_type == "ipsec"
|
||||
- cilium_tunnel_mode in ['vxlan']
|
||||
|
||||
# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled`
|
||||
- name: Stop if `cilium_ipsec_enabled` is defined and `cilium_encryption_type` is not `ipsec`
|
||||
assert:
|
||||
that: cilium_encryption_type == 'ipsec'
|
||||
msg: >
|
||||
It is not possible to use `cilium_ipsec_enabled` when `cilium_encryption_type` is set to {{ cilium_encryption_type }}.
|
||||
when:
|
||||
- cilium_ipsec_enabled is defined
|
||||
- cilium_ipsec_enabled
|
||||
- kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool
|
||||
|
||||
- name: Stop if kernel version is too low for Cilium Wireguard encryption
|
||||
assert:
|
||||
that: ansible_kernel.split('-')[0] is version('5.6.0', '>=')
|
||||
when:
|
||||
- kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool
|
||||
- cilium_encryption_enabled
|
||||
- cilium_encryption_type == "wireguard"
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if bad Cilium identity allocation mode
|
||||
assert:
|
||||
that: cilium_identity_allocation_mode in ['crd', 'kvstore']
|
||||
msg: "cilium_identity_allocation_mode must be either 'crd' or 'kvstore'"
|
||||
|
||||
- name: Stop if bad Cilium Cluster ID
|
||||
assert:
|
||||
that:
|
||||
- cilium_cluster_id <= 255
|
||||
- cilium_cluster_id >= 0
|
||||
msg: "'cilium_cluster_id' must be between 1 and 255"
|
||||
when: cilium_cluster_id is defined
|
||||
|
||||
- name: Stop if bad encryption type
|
||||
assert:
|
||||
that: cilium_encryption_type in ['ipsec', 'wireguard']
|
||||
msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'"
|
||||
when: cilium_encryption_enabled
|
||||
|
||||
- name: Stop if cilium_version is < v1.10.0
|
||||
assert:
|
||||
that: cilium_version | regex_replace('v') is version(cilium_min_version_required, '>=')
|
||||
msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}"
|
||||
|
||||
# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled`
|
||||
- name: Set `cilium_encryption_type` to "ipsec" and if `cilium_ipsec_enabled` is true
|
||||
set_fact:
|
||||
cilium_encryption_type: ipsec
|
||||
cilium_encryption_enabled: true
|
||||
when:
|
||||
- cilium_ipsec_enabled is defined
|
||||
- cilium_ipsec_enabled
|
||||
@@ -0,0 +1,97 @@
|
||||
---
|
||||
- name: Cilium | Ensure BPFFS mounted
|
||||
mount:
|
||||
fstype: bpf
|
||||
path: /sys/fs/bpf
|
||||
src: bpffs
|
||||
state: mounted
|
||||
|
||||
- name: Cilium | Create Cilium certs directory
|
||||
file:
|
||||
dest: "{{ cilium_cert_dir }}"
|
||||
state: directory
|
||||
mode: 0750
|
||||
owner: root
|
||||
group: root
|
||||
when:
|
||||
- cilium_identity_allocation_mode == "kvstore"
|
||||
|
||||
- name: Cilium | Link etcd certificates for cilium
|
||||
file:
|
||||
src: "{{ etcd_cert_dir }}/{{ item.s }}"
|
||||
dest: "{{ cilium_cert_dir }}/{{ item.d }}"
|
||||
mode: 0644
|
||||
state: hard
|
||||
force: yes
|
||||
loop:
|
||||
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
||||
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
||||
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
||||
when:
|
||||
- cilium_identity_allocation_mode == "kvstore"
|
||||
|
||||
- name: Cilium | Create hubble dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/hubble"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- cilium_hubble_install
|
||||
|
||||
- name: Cilium | Create Cilium node manifests
|
||||
template:
|
||||
src: "{{ item.name }}/{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}"
|
||||
mode: 0644
|
||||
loop:
|
||||
- {name: cilium, file: config.yml, type: cm}
|
||||
- {name: cilium-operator, file: crb.yml, type: clusterrolebinding}
|
||||
- {name: cilium-operator, file: cr.yml, type: clusterrole}
|
||||
- {name: cilium, file: crb.yml, type: clusterrolebinding}
|
||||
- {name: cilium, file: cr.yml, type: clusterrole}
|
||||
- {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"}
|
||||
- {name: cilium, file: ds.yml, type: ds}
|
||||
- {name: cilium-operator, file: deploy.yml, type: deploy}
|
||||
- {name: cilium-operator, file: sa.yml, type: sa}
|
||||
- {name: cilium, file: sa.yml, type: sa}
|
||||
register: cilium_node_manifests
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane']
|
||||
- item.when | default(True) | bool
|
||||
|
||||
- name: Cilium | Create Cilium Hubble manifests
|
||||
template:
|
||||
src: "{{ item.name }}/{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}"
|
||||
mode: 0644
|
||||
loop:
|
||||
- {name: hubble, file: config.yml, type: cm}
|
||||
- {name: hubble, file: crb.yml, type: clusterrolebinding}
|
||||
- {name: hubble, file: cr.yml, type: clusterrole}
|
||||
- {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"}
|
||||
- {name: hubble, file: deploy.yml, type: deploy}
|
||||
- {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"}
|
||||
- {name: hubble, file: sa.yml, type: sa}
|
||||
- {name: hubble, file: service.yml, type: service}
|
||||
register: cilium_hubble_manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- cilium_enable_hubble and cilium_hubble_install
|
||||
- item.when | default(True) | bool
|
||||
|
||||
- name: Cilium | Enable portmap addon
|
||||
template:
|
||||
src: 000-cilium-portmap.conflist.j2
|
||||
dest: /etc/cni/net.d/000-cilium-portmap.conflist
|
||||
mode: 0644
|
||||
when: cilium_enable_portmap
|
||||
|
||||
- name: Cilium | Copy Ciliumcli binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/cilium"
|
||||
dest: "{{ bin_dir }}/cilium"
|
||||
mode: 0755
|
||||
remote_src: yes
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- import_tasks: check.yml
|
||||
|
||||
- include_tasks: install.yml
|
||||
|
||||
- include_tasks: apply.yml
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: reset | check and remove devices if still present
|
||||
include_tasks: reset_iface.yml
|
||||
vars:
|
||||
iface: "{{ item }}"
|
||||
loop:
|
||||
- cilium_host
|
||||
- cilium_net
|
||||
- cilium_vxlan
|
||||
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: "reset | check if network device {{ iface }} is present"
|
||||
stat:
|
||||
path: "/sys/class/net/{{ iface }}"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: device_remains
|
||||
|
||||
- name: "reset | remove network device {{ iface }}"
|
||||
command: "ip link del {{ iface }}"
|
||||
when: device_remains.stat.exists
|
||||
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "cilium-portmap",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "cilium-cni"
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": { "portMappings": true }
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# To remove node taints
|
||||
- nodes
|
||||
# To set NetworkUnavailable false on startup
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform LB IP allocation for BGP
|
||||
- services/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumnetworkpolicies/finalizers
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/finalizers
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumendpoints/finalizers
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumnodes/finalizers
|
||||
- ciliumidentities
|
||||
- ciliumidentities/status
|
||||
- ciliumidentities/finalizers
|
||||
- ciliumlocalredirectpolicies
|
||||
- ciliumlocalredirectpolicies/status
|
||||
- ciliumlocalredirectpolicies/finalizers
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
|
||||
- ciliumendpointslices
|
||||
{% endif %}
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
|
||||
- ciliumbgploadbalancerippools
|
||||
- ciliumbgppeeringpolicies
|
||||
- ciliumenvoyconfigs
|
||||
{% endif %}
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
# For cilium-operator running in HA mode.
|
||||
#
|
||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
|
||||
# between multiple running instances.
|
||||
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
|
||||
# common and fewer objects in the cluster watch "all Leases".
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- ciliumbgploadbalancerippools.cilium.io
|
||||
- ciliumbgppeeringpolicies.cilium.io
|
||||
- ciliumclusterwideenvoyconfigs.cilium.io
|
||||
- ciliumclusterwidenetworkpolicies.cilium.io
|
||||
- ciliumegressgatewaypolicies.cilium.io
|
||||
- ciliumegressnatpolicies.cilium.io
|
||||
- ciliumendpoints.cilium.io
|
||||
- ciliumendpointslices.cilium.io
|
||||
- ciliumenvoyconfigs.cilium.io
|
||||
- ciliumexternalworkloads.cilium.io
|
||||
- ciliumidentities.cilium.io
|
||||
- ciliumlocalredirectpolicies.cilium.io
|
||||
- ciliumnetworkpolicies.cilium.io
|
||||
- ciliumnodes.cilium.io
|
||||
{% endif %}
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,166 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
replicas: {{ cilium_operator_replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
{% if cilium_enable_prometheus %}
|
||||
annotations:
|
||||
prometheus.io/port: "{{ cilium_operator_scrape_port }}"
|
||||
prometheus.io/scrape: "true"
|
||||
{% endif %}
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-operator
|
||||
image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- cilium-operator
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
{% if cilium_operator_custom_args is string %}
|
||||
- {{ cilium_operator_custom_args }}
|
||||
{% else %}
|
||||
{% for flag in cilium_operator_custom_args %}
|
||||
- {{ flag }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: cilium-aws
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
optional: true
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: cilium-aws
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
optional: true
|
||||
- name: AWS_DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: cilium-aws
|
||||
key: AWS_DEFAULT_REGION
|
||||
optional: true
|
||||
{% if cilium_kube_proxy_replacement == 'strict' %}
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
|
||||
{% endif %}
|
||||
{% if cilium_enable_prometheus %}
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: prometheus
|
||||
containerPort: {{ cilium_operator_scrape_port }}
|
||||
hostPort: {{ cilium_operator_scrape_port }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
{% if cilium_enable_ipv4 %}
|
||||
host: 127.0.0.1
|
||||
{% else %}
|
||||
host: '::1'
|
||||
{% endif %}
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
- name: etcd-config-path
|
||||
mountPath: /var/lib/etcd-config
|
||||
readOnly: true
|
||||
- name: etcd-secrets
|
||||
mountPath: "{{cilium_cert_dir}}"
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
{% for volume_mount in cilium_operator_extra_volume_mounts %}
|
||||
- {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }}
|
||||
{% endfor %}
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: cilium-operator
|
||||
serviceAccountName: cilium-operator
|
||||
# In HA mode, cilium-operator pods must not be scheduled on the same
|
||||
# node as they will clash with each other.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
# To read the etcd config stored in config maps
|
||||
- name: etcd-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: etcd-config
|
||||
path: etcd.config
|
||||
# To read the k8s etcd secrets in case the user might want to use TLS
|
||||
- name: etcd-secrets
|
||||
hostPath:
|
||||
path: "{{cilium_cert_dir}}"
|
||||
{% endif %}
|
||||
{% for volume in cilium_operator_extra_volumes %}
|
||||
- {{ volume | to_nice_yaml(indent=2) | indent(10) }}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,248 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
identity-allocation-mode: {{ cilium_identity_allocation_mode }}
|
||||
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
# This etcd-config contains the etcd endpoints of your cluster. If you use
|
||||
# TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
|
||||
etcd-config: |-
|
||||
---
|
||||
endpoints:
|
||||
{% for ip_addr in etcd_access_addresses.split(',') %}
|
||||
- {{ ip_addr }}
|
||||
{% endfor %}
|
||||
|
||||
# In case you want to use TLS in etcd, uncomment the 'ca-file' line
|
||||
# and create a kubernetes secret by following the tutorial in
|
||||
# https://cilium.link/etcd-config
|
||||
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
|
||||
|
||||
# In case you want client to server authentication, uncomment the following
|
||||
# lines and create a kubernetes secret by following the tutorial in
|
||||
# https://cilium.link/etcd-config
|
||||
key-file: "{{ cilium_cert_dir }}/key.pem"
|
||||
cert-file: "{{ cilium_cert_dir }}/cert.crt"
|
||||
|
||||
# kvstore
|
||||
# https://docs.cilium.io/en/latest/cmdref/kvstore/
|
||||
kvstore: etcd
|
||||
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
|
||||
{% endif %}
|
||||
|
||||
# If you want metrics enabled in all of your Cilium agents, set the port for
|
||||
# which the Cilium agents will have their metrics exposed.
|
||||
# This option deprecates the "prometheus-serve-addr" in the
|
||||
# "cilium-metrics-config" ConfigMap
|
||||
# NOTE that this will open the port on ALL nodes where Cilium pods are
|
||||
# scheduled.
|
||||
{% if cilium_enable_prometheus %}
|
||||
prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}"
|
||||
operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}"
|
||||
enable-metrics: "true"
|
||||
{% endif %}
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "{{ cilium_debug }}"
|
||||
enable-ipv4: "{{ cilium_enable_ipv4 }}"
|
||||
enable-ipv6: "{{ cilium_enable_ipv6 }}"
|
||||
# If a serious issue occurs during Cilium startup, this
|
||||
# invasive option may be set to true to remove all persistent
|
||||
# state. Endpoints will not be restored using knowledge from a
|
||||
# prior Cilium run, so they may receive new IP addresses upon
|
||||
# restart. This also triggers clean-cilium-bpf-state.
|
||||
clean-cilium-state: "false"
|
||||
# If you want to clean cilium BPF state, set this to true;
|
||||
# Removes all BPF maps from the filesystem. Upon restart,
|
||||
# endpoints are restored with the same IP addresses, however
|
||||
# any ongoing connections may be disrupted briefly.
|
||||
# Loadbalancing decisions will be reset, so any ongoing
|
||||
# connections via a service may be loadbalanced to a different
|
||||
# backend after restart.
|
||||
clean-cilium-bpf-state: "false"
|
||||
|
||||
# Users who wish to specify their own custom CNI configuration file must set
|
||||
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
|
||||
custom-cni-conf: "false"
|
||||
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: "{{ cilium_monitor_aggregation }}"
|
||||
|
||||
# ct-global-max-entries-* specifies the maximum number of connections
|
||||
# supported across all endpoints, split by protocol: tcp or other. One pair
|
||||
# of maps uses these values for IPv4 connections, and another pair of maps
|
||||
# use these values for IPv6 connections.
|
||||
#
|
||||
# If these values are modified, then during the next Cilium startup the
|
||||
# tracking of ongoing connections may be disrupted. This may lead to brief
|
||||
# policy drops or a change in loadbalancing decisions for a connection.
|
||||
#
|
||||
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
|
||||
# during the upgrade process, comment out these options.
|
||||
bpf-ct-global-tcp-max: "524288"
|
||||
bpf-ct-global-any-max: "262144"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# This may lead to policy drops or a change in loadbalancing decisions for a
|
||||
# connection for some time. Endpoints may need to be recreated to restore
|
||||
# connectivity.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: "{{ cilium_tunnel_mode }}"
|
||||
|
||||
# Enable Bandwidth Manager
|
||||
# Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||||
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||||
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||||
{% if cilium_enable_bandwidth_manager %}
|
||||
enable-bandwidth-manager: "true"
|
||||
{% endif %}
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: "{{ cilium_cluster_name }}"
|
||||
|
||||
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||
#cluster-id: 1
|
||||
{% if cilium_cluster_id is defined %}
|
||||
cluster-id: "{{ cilium_cluster_id }}"
|
||||
{% endif %}
|
||||
|
||||
# `wait-bpf-mount` is removed after v1.10.4
|
||||
# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da
|
||||
{% if cilium_version | regex_replace('v') is version('1.10.4', '<') %}
|
||||
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
|
||||
wait-bpf-mount: "false"
|
||||
{% endif %}
|
||||
|
||||
kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}"
|
||||
|
||||
# `native-routing-cidr` is deprecated in 1.10, removed in 1.12.
|
||||
# Replaced by `ipv4-native-routing-cidr`
|
||||
# https://github.com/cilium/cilium/pull/16695
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
|
||||
native-routing-cidr: "{{ cilium_native_routing_cidr }}"
|
||||
{% else %}
|
||||
{% if cilium_native_routing_cidr | length %}
|
||||
ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}"
|
||||
{% endif %}
|
||||
{% if cilium_native_routing_cidr_ipv6 | length %}
|
||||
ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}"
|
||||
|
||||
operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}"
|
||||
|
||||
# Hubble settings
|
||||
{% if cilium_enable_hubble %}
|
||||
enable-hubble: "true"
|
||||
{% if cilium_enable_hubble_metrics %}
|
||||
hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}"
|
||||
hubble-metrics:
|
||||
{% for hubble_metrics_cycle in cilium_hubble_metrics %}
|
||||
{{ hubble_metrics_cycle }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
hubble-listen-address: ":4244"
|
||||
{% if cilium_enable_hubble and cilium_hubble_install %}
|
||||
hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}"
|
||||
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
|
||||
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
|
||||
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# IP Masquerade Agent
|
||||
enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}"
|
||||
|
||||
{% for key, value in cilium_config_extra_vars.items() %}
|
||||
{{ key }}: "{{ value }}"
|
||||
{% endfor %}
|
||||
|
||||
# Enable transparent network encryption
|
||||
{% if cilium_encryption_enabled %}
|
||||
{% if cilium_encryption_type == "ipsec" %}
|
||||
enable-ipsec: "true"
|
||||
ipsec-key-file: /etc/ipsec/keys
|
||||
encrypt-node: "{{ cilium_ipsec_node_encryption }}"
|
||||
{% endif %}
|
||||
|
||||
{% if cilium_encryption_type == "wireguard" %}
|
||||
enable-wireguard: "true"
|
||||
enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# IPAM settings
|
||||
ipam: "{{ cilium_ipam_mode }}"
|
||||
|
||||
agent-health-port: "{{ cilium_agent_health_port }}"
|
||||
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_host_root != '' %}
|
||||
cgroup-root: "{{ cilium_cgroup_host_root }}"
|
||||
{% endif %}
|
||||
|
||||
bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}"
|
||||
|
||||
enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}"
|
||||
enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}"
|
||||
|
||||
enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}"
|
||||
|
||||
enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}"
|
||||
|
||||
enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}"
|
||||
|
||||
enable-well-known-identities: "{{ cilium_enable_well_known_identities }}"
|
||||
|
||||
monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}"
|
||||
|
||||
enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}"
|
||||
|
||||
disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}"
|
||||
{% if cilium_ip_masq_agent_enable %}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ip-masq-agent
|
||||
namespace: kube-system
|
||||
data:
|
||||
config: |
|
||||
nonMasqueradeCIDRs:
|
||||
{% for cidr in cilium_non_masquerade_cidrs %}
|
||||
- {{ cidr }}
|
||||
{% endfor %}
|
||||
masqLinkLocal: {{ cilium_masq_link_local|bool }}
|
||||
resyncInterval: "{{ cilium_ip_masq_resync_interval }}"
|
||||
{% endif %}
|
||||
@@ -0,0 +1,122 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- pods
|
||||
- endpoints
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
{% endif %}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
# Deprecated for removal in v1.10
|
||||
- create
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
|
||||
# This is used when validating policies in preflight. This will need to stay
|
||||
# until we figure out how to avoid "get" inside the preflight, and then
|
||||
# should be removed ideally.
|
||||
- get
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
- ciliumlocalredirectpolicies
|
||||
- ciliumlocalredirectpolicies/status
|
||||
- ciliumegressnatpolicies
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
|
||||
- ciliumendpointslices
|
||||
{% endif %}
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
|
||||
- ciliumbgploadbalancerippools
|
||||
- ciliumbgppeeringpolicies
|
||||
{% endif %}
|
||||
{% if cilium_version | regex_replace('v') is version('1.11.5', '<') %}
|
||||
- ciliumnetworkpolicies/finalizers
|
||||
- ciliumclusterwidenetworkpolicies/finalizers
|
||||
- ciliumendpoints/finalizers
|
||||
- ciliumnodes/finalizers
|
||||
- ciliumidentities/finalizers
|
||||
- ciliumlocalredirectpolicies/finalizers
|
||||
{% endif %}
|
||||
verbs:
|
||||
- '*'
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumclusterwideenvoyconfigs
|
||||
- ciliumenvoyconfigs
|
||||
- ciliumegressgatewaypolicies
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{% endif %}
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,424 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
# Specifies the maximum number of Pods that can be unavailable during the update process.
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{% if cilium_enable_prometheus %}
|
||||
prometheus.io/port: "{{ cilium_agent_scrape_port }}"
|
||||
prometheus.io/scrape: "true"
|
||||
{% endif %}
|
||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-agent
|
||||
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- cilium-agent
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
{% if cilium_mtu != "" %}
|
||||
- --mtu={{ cilium_mtu }}
|
||||
{% endif %}
|
||||
{% if cilium_agent_custom_args is string %}
|
||||
- {{ cilium_agent_custom_args }}
|
||||
{% else %}
|
||||
{% for flag in cilium_agent_custom_args %}
|
||||
- {{ flag }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
startupProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: {{ cilium_agent_health_port }}
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 105
|
||||
periodSeconds: 2
|
||||
successThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: {{ cilium_agent_health_port }}
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 10
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: {{ cilium_agent_health_port }}
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
timeoutSeconds: 5
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
{% if cilium_kube_proxy_replacement == 'strict' %}
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
|
||||
{% endif %}
|
||||
{% for env_var in cilium_agent_extra_env_vars %}
|
||||
- {{ env_var | to_nice_yaml(indent=2) | indent(10) }}
|
||||
{% endfor %}
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
- "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}"
|
||||
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
|
||||
- "--enable-debug={{ cilium_debug | string | lower }}"
|
||||
- "--log-file={{ cilium_cni_log_file }}"
|
||||
{% endif %}
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ cilium_cpu_limit }}
|
||||
memory: {{ cilium_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ cilium_cpu_requests }}
|
||||
memory: {{ cilium_memory_requests }}
|
||||
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
|
||||
ports:
|
||||
{% endif %}
|
||||
{% if cilium_enable_prometheus %}
|
||||
- name: prometheus
|
||||
containerPort: {{ cilium_agent_scrape_port }}
|
||||
hostPort: {{ cilium_agent_scrape_port }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
{% if cilium_enable_hubble_metrics %}
|
||||
- name: hubble-metrics
|
||||
containerPort: {{ cilium_hubble_scrape_port }}
|
||||
hostPort: {{ cilium_hubble_scrape_port }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
mountPropagation: Bidirectional
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
- name: cni-path
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: etc-cni-netd
|
||||
mountPath: /host/etc/cni/net.d
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
- name: etcd-config-path
|
||||
mountPath: /var/lib/etcd-config
|
||||
readOnly: true
|
||||
- name: etcd-secrets
|
||||
mountPath: "{{cilium_cert_dir}}"
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
- name: clustermesh-secrets
|
||||
mountPath: /var/lib/cilium/clustermesh
|
||||
readOnly: true
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
{% if cilium_ip_masq_agent_enable %}
|
||||
- name: ip-masq-agent
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
# Needed to be able to load kernel modules
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
|
||||
- name: cilium-ipsec-secrets
|
||||
mountPath: /etc/ipsec
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
{% if cilium_hubble_install %}
|
||||
- name: hubble-tls
|
||||
mountPath: /var/lib/cilium/tls/hubble
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
{% for volume_mount in cilium_agent_extra_volume_mounts %}
|
||||
- {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }}
|
||||
{% endfor %}
|
||||
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
initContainers:
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %}
|
||||
- name: mount-cgroup
|
||||
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
env:
|
||||
- name: CGROUP_ROOT
|
||||
value: {{ cilium_cgroup_host_root }}
|
||||
- name: BIN_PATH
|
||||
value: /opt/cni/bin
|
||||
command:
|
||||
- sh
|
||||
- -ec
|
||||
# The statically linked Go program binary is invoked to avoid any
|
||||
# dependency on utilities like sh and mount that can be missing on certain
|
||||
# distros installed on the underlying host. Copy the binary to the
|
||||
# same directory where we install cilium cni plugin so that exec permissions
|
||||
# are available.
|
||||
- |
|
||||
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
|
||||
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
|
||||
rm /hostbin/cilium-mount
|
||||
volumeMounts:
|
||||
- name: hostproc
|
||||
mountPath: /hostproc
|
||||
- name: cni-path
|
||||
mountPath: /hostbin
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% endif %}
|
||||
{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %}
|
||||
- name: apply-sysctl-overwrites
|
||||
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
env:
|
||||
- name: BIN_PATH
|
||||
value: /opt/cni/bin
|
||||
command:
|
||||
- sh
|
||||
- -ec
|
||||
# The statically linked Go program binary is invoked to avoid any
|
||||
# dependency on utilities like sh that can be missing on certain
|
||||
# distros installed on the underlying host. Copy the binary to the
|
||||
# same directory where we install cilium cni plugin so that exec permissions
|
||||
# are available.
|
||||
- |
|
||||
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
|
||||
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
|
||||
rm /hostbin/cilium-sysctlfix
|
||||
volumeMounts:
|
||||
- name: hostproc
|
||||
mountPath: /hostproc
|
||||
- name: cni-path
|
||||
mountPath: /hostbin
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% endif %}
|
||||
- name: clean-cilium-state
|
||||
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-state
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-bpf-state
|
||||
optional: true
|
||||
# Removed in 1.11 and up.
|
||||
# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '<') %}
|
||||
- name: CILIUM_WAIT_BPF_MOUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: wait-bpf-mount
|
||||
name: cilium-config
|
||||
optional: true
|
||||
{% endif %}
|
||||
{% if cilium_kube_proxy_replacement == 'strict' %}
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
|
||||
{% endif %}
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
|
||||
# Required to mount cgroup filesystem from the host to cilium agent pod
|
||||
- name: cilium-cgroup
|
||||
mountPath: {{ cilium_cgroup_host_root }}
|
||||
mountPropagation: HostToContainer
|
||||
{% endif %}
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: cilium
|
||||
serviceAccountName: cilium
|
||||
terminationGracePeriodSeconds: 1
|
||||
hostNetwork: true
|
||||
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{% endif %}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- name: cilium-run
|
||||
hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- name: bpf-maps
|
||||
hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
|
||||
# To mount cgroup2 filesystem on the host
|
||||
- name: hostproc
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
# To keep state between restarts / upgrades for cgroup2 filesystem
|
||||
- name: cilium-cgroup
|
||||
hostPath:
|
||||
path: {{ cilium_cgroup_host_root }}
|
||||
type: DirectoryOrCreate
|
||||
{% endif %}
|
||||
# To install cilium cni plugin in the host
|
||||
- name: cni-path
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni configuration in the host
|
||||
- name: etc-cni-netd
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
# To be able to load kernel modules
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
{% if cilium_identity_allocation_mode == "kvstore" %}
|
||||
# To read the etcd config stored in config maps
|
||||
- name: etcd-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
defaultMode: 0400
|
||||
items:
|
||||
- key: etcd-config
|
||||
path: etcd.config
|
||||
# To read the k8s etcd secrets in case the user might want to use TLS
|
||||
- name: etcd-secrets
|
||||
hostPath:
|
||||
path: "{{cilium_cert_dir}}"
|
||||
{% endif %}
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
secretName: cilium-clustermesh
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
defaultMode: 0400
|
||||
optional: true
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
{% if cilium_ip_masq_agent_enable %}
|
||||
- name: ip-masq-agent
|
||||
configMap:
|
||||
name: ip-masq-agent
|
||||
optional: true
|
||||
items:
|
||||
- key: config
|
||||
path: ip-masq-agent
|
||||
{% endif %}
|
||||
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
|
||||
- name: cilium-ipsec-secrets
|
||||
secret:
|
||||
secretName: cilium-ipsec-keys
|
||||
{% endif %}
|
||||
{% if cilium_hubble_install %}
|
||||
- name: hubble-tls
|
||||
projected:
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
defaultMode: 0400
|
||||
sources:
|
||||
- secret:
|
||||
name: hubble-server-certs
|
||||
optional: true
|
||||
items:
|
||||
- key: ca.crt
|
||||
path: client-ca.crt
|
||||
- key: tls.crt
|
||||
path: server.crt
|
||||
- key: tls.key
|
||||
path: server.key
|
||||
{% endif %}
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
keys: {{ cilium_ipsec_key }}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cilium-ipsec-keys
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
@@ -0,0 +1,87 @@
|
||||
---
|
||||
# Source: cilium/templates/hubble-relay-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: hubble-relay-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
config.yaml: |
|
||||
peer-service: unix:///var/run/cilium/hubble.sock
|
||||
listen-address: :4245
|
||||
dial-timeout:
|
||||
retry-timeout:
|
||||
sort-buffer-len-max:
|
||||
sort-buffer-drain-timeout:
|
||||
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
|
||||
tls-client-key-file: /var/lib/hubble-relay/tls/client.key
|
||||
tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
|
||||
disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
|
||||
disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: hubble-ui-envoy
|
||||
namespace: kube-system
|
||||
data:
|
||||
envoy.yaml: |
|
||||
static_resources:
|
||||
listeners:
|
||||
- name: listener_hubble_ui
|
||||
address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 8081
|
||||
filter_chains:
|
||||
- filters:
|
||||
- name: envoy.filters.network.http_connection_manager
|
||||
config:
|
||||
codec_type: auto
|
||||
stat_prefix: ingress_http
|
||||
route_config:
|
||||
name: local_route
|
||||
virtual_hosts:
|
||||
- name: local_service
|
||||
domains: ['*']
|
||||
routes:
|
||||
- match:
|
||||
prefix: '/api/'
|
||||
route:
|
||||
cluster: backend
|
||||
max_grpc_timeout: 0s
|
||||
prefix_rewrite: '/'
|
||||
- match:
|
||||
prefix: '/'
|
||||
route:
|
||||
cluster: frontend
|
||||
cors:
|
||||
allow_origin_string_match:
|
||||
- prefix: '*'
|
||||
allow_methods: GET, PUT, DELETE, POST, OPTIONS
|
||||
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
|
||||
max_age: '1728000'
|
||||
expose_headers: grpc-status,grpc-message
|
||||
http_filters:
|
||||
- name: envoy.filters.http.grpc_web
|
||||
- name: envoy.filters.http.cors
|
||||
- name: envoy.filters.http.router
|
||||
clusters:
|
||||
- name: frontend
|
||||
connect_timeout: 0.25s
|
||||
type: strict_dns
|
||||
lb_policy: round_robin
|
||||
hosts:
|
||||
- socket_address:
|
||||
address: 127.0.0.1
|
||||
port_value: 8080
|
||||
- name: backend
|
||||
connect_timeout: 0.25s
|
||||
type: logical_dns
|
||||
lb_policy: round_robin
|
||||
http2_protocol_options: {}
|
||||
hosts:
|
||||
- socket_address:
|
||||
address: 127.0.0.1
|
||||
port_value: 8090
|
||||
@@ -0,0 +1,106 @@
|
||||
{% if cilium_hubble_tls_generate %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: hubble-generate-certs
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
resourceNames:
|
||||
- hubble-server-certs
|
||||
- hubble-relay-client-certs
|
||||
- hubble-relay-server-certs
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- hubble-ca-cert
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
resourceNames:
|
||||
- hubble-ca-secret
|
||||
verbs:
|
||||
- get
|
||||
{% endif %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-relay-clusterrole.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: hubble-relay
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- componentstatuses
|
||||
- endpoints
|
||||
- namespaces
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-clusterrole.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: hubble-ui
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- componentstatuses
|
||||
- endpoints
|
||||
- namespaces
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
@@ -0,0 +1,44 @@
|
||||
{% if cilium_hubble_tls_generate %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: hubble-generate-certs
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: hubble-generate-certs
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: hubble-generate-certs
|
||||
namespace: kube-system
|
||||
{% endif %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: hubble-relay
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: hubble-relay
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
name: hubble-relay
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: hubble-ui
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: hubble-ui
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
name: hubble-ui
|
||||
@@ -0,0 +1,49 @@
|
||||
---
|
||||
# Source: cilium/templates/hubble-generate-certs-cronjob.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: hubble-generate-certs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: hubble-generate-certs
|
||||
spec:
|
||||
schedule: "0 0 1 */4 *"
|
||||
concurrencyPolicy: Forbid
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: hubble-generate-certs
|
||||
spec:
|
||||
serviceAccount: hubble-generate-certs
|
||||
serviceAccountName: hubble-generate-certs
|
||||
containers:
|
||||
- name: certgen
|
||||
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- "/usr/bin/cilium-certgen"
|
||||
# Because this is executed as a job, we pass the values as command
|
||||
# line args instead of via config map. This allows users to inspect
|
||||
# the values used in past runs by inspecting the completed pod.
|
||||
args:
|
||||
- "--cilium-namespace=kube-system"
|
||||
- "--hubble-ca-reuse-secret=true"
|
||||
- "--hubble-ca-secret-name=hubble-ca-secret"
|
||||
- "--hubble-ca-generate=true"
|
||||
- "--hubble-ca-validity-duration=94608000s"
|
||||
- "--hubble-ca-config-map-create=true"
|
||||
- "--hubble-ca-config-map-name=hubble-ca-cert"
|
||||
- "--hubble-server-cert-generate=true"
|
||||
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
|
||||
- "--hubble-server-cert-validity-duration=94608000s"
|
||||
- "--hubble-server-cert-secret-name=hubble-server-certs"
|
||||
- "--hubble-relay-client-cert-generate=true"
|
||||
- "--hubble-relay-client-cert-validity-duration=94608000s"
|
||||
- "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
|
||||
- "--hubble-relay-server-cert-generate=false"
|
||||
hostNetwork: true
|
||||
restartPolicy: OnFailure
|
||||
ttlSecondsAfterFinished: 1800
|
||||
@@ -0,0 +1,161 @@
|
||||
---
|
||||
# Source: cilium/templates/hubble-relay-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hubble-relay
|
||||
labels:
|
||||
k8s-app: hubble-relay
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: hubble-relay
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
k8s-app: hubble-relay
|
||||
spec:
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "k8s-app"
|
||||
operator: In
|
||||
values:
|
||||
- cilium
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
containers:
|
||||
- name: hubble-relay
|
||||
image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- hubble-relay
|
||||
args:
|
||||
- serve
|
||||
ports:
|
||||
- name: grpc
|
||||
containerPort: 4245
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: grpc
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: grpc
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/cilium
|
||||
name: hubble-sock-dir
|
||||
readOnly: true
|
||||
- mountPath: /etc/hubble-relay
|
||||
name: config
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/hubble-relay/tls
|
||||
name: tls
|
||||
readOnly: true
|
||||
restartPolicy: Always
|
||||
serviceAccount: hubble-relay
|
||||
serviceAccountName: hubble-relay
|
||||
terminationGracePeriodSeconds: 0
|
||||
volumes:
|
||||
- configMap:
|
||||
name: hubble-relay-config
|
||||
items:
|
||||
- key: config.yaml
|
||||
path: config.yaml
|
||||
name: config
|
||||
- hostPath:
|
||||
path: /var/run/cilium
|
||||
type: Directory
|
||||
name: hubble-sock-dir
|
||||
- projected:
|
||||
sources:
|
||||
- secret:
|
||||
name: hubble-relay-client-certs
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: client.crt
|
||||
- key: tls.key
|
||||
path: client.key
|
||||
- configMap:
|
||||
name: hubble-ca-cert
|
||||
items:
|
||||
- key: ca.crt
|
||||
path: hubble-server-ca.crt
|
||||
name: tls
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-deployment.yaml
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: hubble-ui
|
||||
name: hubble-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: hubble-ui
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
k8s-app: hubble-ui
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
serviceAccount: hubble-ui
|
||||
serviceAccountName: hubble-ui
|
||||
containers:
|
||||
- name: frontend
|
||||
image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
resources:
|
||||
{}
|
||||
- name: backend
|
||||
image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
env:
|
||||
- name: EVENTS_SERVER_PORT
|
||||
value: "8090"
|
||||
- name: FLOWS_API_ADDR
|
||||
value: "hubble-relay:80"
|
||||
ports:
|
||||
- containerPort: 8090
|
||||
name: grpc
|
||||
resources:
|
||||
{}
|
||||
- name: proxy
|
||||
image: "{{ cilium_hubble_envoy_image_repo }}:{{ cilium_hubble_envoy_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
name: http
|
||||
resources:
|
||||
{}
|
||||
command: ["envoy"]
|
||||
args:
|
||||
[
|
||||
"-c",
|
||||
"/etc/envoy.yaml",
|
||||
"-l",
|
||||
"info"
|
||||
]
|
||||
volumeMounts:
|
||||
- name: hubble-ui-envoy-yaml
|
||||
mountPath: /etc/envoy.yaml
|
||||
subPath: envoy.yaml
|
||||
volumes:
|
||||
- name: hubble-ui-envoy-yaml
|
||||
configMap:
|
||||
name: hubble-ui-envoy
|
||||
@@ -0,0 +1,45 @@
|
||||
---
|
||||
# Source: cilium/templates/hubble-generate-certs-job.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: hubble-generate-certs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: hubble-generate-certs
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: hubble-generate-certs
|
||||
spec:
|
||||
serviceAccount: hubble-generate-certs
|
||||
serviceAccountName: hubble-generate-certs
|
||||
containers:
|
||||
- name: certgen
|
||||
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- "/usr/bin/cilium-certgen"
|
||||
# Because this is executed as a job, we pass the values as command
|
||||
# line args instead of via config map. This allows users to inspect
|
||||
# the values used in past runs by inspecting the completed pod.
|
||||
args:
|
||||
- "--cilium-namespace=kube-system"
|
||||
- "--hubble-ca-reuse-secret=true"
|
||||
- "--hubble-ca-secret-name=hubble-ca-secret"
|
||||
- "--hubble-ca-generate=true"
|
||||
- "--hubble-ca-validity-duration=94608000s"
|
||||
- "--hubble-ca-config-map-create=true"
|
||||
- "--hubble-ca-config-map-name=hubble-ca-cert"
|
||||
- "--hubble-server-cert-generate=true"
|
||||
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
|
||||
- "--hubble-server-cert-validity-duration=94608000s"
|
||||
- "--hubble-server-cert-secret-name=hubble-server-certs"
|
||||
- "--hubble-relay-client-cert-generate=true"
|
||||
- "--hubble-relay-client-cert-validity-duration=94608000s"
|
||||
- "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
|
||||
- "--hubble-relay-server-cert-generate=false"
|
||||
hostNetwork: true
|
||||
restartPolicy: OnFailure
|
||||
ttlSecondsAfterFinished: 1800
|
||||
@@ -0,0 +1,23 @@
|
||||
{% if cilium_hubble_tls_generate %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: hubble-generate-certs
|
||||
namespace: kube-system
|
||||
{% endif %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-relay-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: hubble-relay
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: hubble-ui
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,58 @@
|
||||
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent-service.yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hubble-metrics
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: "9091"
|
||||
labels:
|
||||
k8s-app: hubble
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: hubble-metrics
|
||||
port: 9091
|
||||
protocol: TCP
|
||||
targetPort: hubble-metrics
|
||||
selector:
|
||||
k8s-app: cilium
|
||||
{% endif %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-relay-service.yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hubble-relay
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: hubble-relay
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
k8s-app: hubble-relay
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 4245
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-service.yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hubble-ui
|
||||
labels:
|
||||
k8s-app: hubble-ui
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: hubble-ui
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8081
|
||||
type: ClusterIP
|
||||
Reference in New Issue
Block a user