This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,13 @@
{
"cniVersion": "0.3.1",
"name": "cilium-portmap",
"plugins": [
{
"type": "cilium-cni"
},
{
"type": "portmap",
"capabilities": { "portMappings": true }
}
]
}

View File

@@ -0,0 +1,146 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
- ciliumenvoyconfigs
{% endif %}
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- update
- watch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumbgploadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
{% endif %}

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system

View File

@@ -0,0 +1,166 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
spec:
replicas: {{ cilium_operator_replicas }}
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
{% if cilium_enable_prometheus %}
annotations:
prometheus.io/port: "{{ cilium_operator_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-operator
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
{% if cilium_operator_custom_args is string %}
- {{ cilium_operator_custom_args }}
{% else %}
{% for flag in cilium_operator_custom_args %}
- {{ flag }}
{% endfor %}
{% endif %}
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_ACCESS_KEY_ID
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_SECRET_ACCESS_KEY
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_DEFAULT_REGION
optional: true
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
{% if cilium_enable_prometheus %}
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
ports:
- name: prometheus
containerPort: {{ cilium_operator_scrape_port }}
hostPort: {{ cilium_operator_scrape_port }}
protocol: TCP
{% endif %}
livenessProbe:
httpGet:
{% if cilium_enable_ipv4 %}
host: 127.0.0.1
{% else %}
host: '::1'
{% endif %}
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_identity_allocation_mode == "kvstore" %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{cilium_cert_dir}}"
readOnly: true
{% endif %}
{% for volume_mount in cilium_operator_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }}
{% endfor %}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
io.cilium/app: operator
tolerations:
- operator: Exists
volumes:
- name: cilium-config-path
configMap:
name: cilium-config
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
{% endif %}
{% for volume in cilium_operator_extra_volumes %}
- {{ volume | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system

View File

@@ -0,0 +1,248 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
identity-allocation-mode: {{ cilium_identity_allocation_mode }}
{% if cilium_identity_allocation_mode == "kvstore" %}
# This etcd-config contains the etcd endpoints of your cluster. If you use
# TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
endpoints:
{% for ip_addr in etcd_access_addresses.split(',') %}
- {{ ip_addr }}
{% endfor %}
# In case you want to use TLS in etcd, uncomment the 'ca-file' line
# and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
# In case you want client to server authentication, uncomment the following
# lines and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt"
# kvstore
# https://docs.cilium.io/en/latest/cmdref/kvstore/
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
{% endif %}
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
{% if cilium_enable_prometheus %}
prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}"
operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}"
enable-metrics: "true"
{% endif %}
# If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}"
enable-ipv4: "{{ cilium_enable_ipv4 }}"
enable-ipv6: "{{ cilium_enable_ipv6 }}"
# If a serious issue occurs during Cilium startup, this
# invasive option may be set to true to remove all persistent
# state. Endpoints will not be restored using knowledge from a
# prior Cilium run, so they may receive new IP addresses upon
# restart. This also triggers clean-cilium-bpf-state.
clean-cilium-state: "false"
# If you want to clean cilium BPF state, set this to true;
# Removes all BPF maps from the filesystem. Upon restart,
# endpoints are restored with the same IP addresses, however
# any ongoing connections may be disrupted briefly.
# Loadbalancing decisions will be reset, so any ongoing
# connections via a service may be loadbalanced to a different
# backend after restart.
clean-cilium-bpf-state: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: "{{ cilium_monitor_aggregation }}"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "524288"
bpf-ct-global-any-max: "262144"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "{{ cilium_tunnel_mode }}"
# Enable Bandwidth Manager
# Ciliums bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
{% if cilium_enable_bandwidth_manager %}
enable-bandwidth-manager: "true"
{% endif %}
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "{{ cilium_cluster_name }}"
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
#cluster-id: 1
{% if cilium_cluster_id is defined %}
cluster-id: "{{ cilium_cluster_id }}"
{% endif %}
# `wait-bpf-mount` is removed after v1.10.4
# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da
{% if cilium_version | regex_replace('v') is version('1.10.4', '<') %}
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
{% endif %}
kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}"
# `native-routing-cidr` is deprecated in 1.10, removed in 1.12.
# Replaced by `ipv4-native-routing-cidr`
# https://github.com/cilium/cilium/pull/16695
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% else %}
{% if cilium_native_routing_cidr | length %}
ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% endif %}
{% if cilium_native_routing_cidr_ipv6 | length %}
ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}"
{% endif %}
{% endif %}
auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}"
operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}"
# Hubble settings
{% if cilium_enable_hubble %}
enable-hubble: "true"
{% if cilium_enable_hubble_metrics %}
hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}"
hubble-metrics:
{% for hubble_metrics_cycle in cilium_hubble_metrics %}
{{ hubble_metrics_cycle }}
{% endfor %}
{% endif %}
hubble-listen-address: ":4244"
{% if cilium_enable_hubble and cilium_hubble_install %}
hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
{% endif %}
{% endif %}
# IP Masquerade Agent
enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}"
{% for key, value in cilium_config_extra_vars.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
# Enable transparent network encryption
{% if cilium_encryption_enabled %}
{% if cilium_encryption_type == "ipsec" %}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
encrypt-node: "{{ cilium_ipsec_node_encryption }}"
{% endif %}
{% if cilium_encryption_type == "wireguard" %}
enable-wireguard: "true"
enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}"
{% endif %}
{% endif %}
# IPAM settings
ipam: "{{ cilium_ipam_mode }}"
agent-health-port: "{{ cilium_agent_health_port }}"
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_host_root != '' %}
cgroup-root: "{{ cilium_cgroup_host_root }}"
{% endif %}
bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}"
enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}"
enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}"
enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}"
enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}"
enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}"
enable-well-known-identities: "{{ cilium_enable_well_known_identities }}"
monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}"
enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}"
disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}"
{% if cilium_ip_masq_agent_enable %}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ip-masq-agent
namespace: kube-system
data:
config: |
nonMasqueradeCIDRs:
{% for cidr in cilium_non_masquerade_cidrs %}
- {{ cidr }}
{% endfor %}
masqLinkLocal: {{ cilium_masq_link_local|bool }}
resyncInterval: "{{ cilium_ip_masq_resync_interval }}"
{% endif %}

View File

@@ -0,0 +1,122 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
{% endif %}
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
# Deprecated for removal in v1.10
- create
- list
- watch
- update
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumegressnatpolicies
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11.5', '<') %}
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints/finalizers
- ciliumnodes/finalizers
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies/finalizers
{% endif %}
verbs:
- '*'
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- apiGroups:
- cilium.io
resources:
- ciliumclusterwideenvoyconfigs
- ciliumenvoyconfigs
- ciliumegressgatewaypolicies
verbs:
- list
- watch
{% endif %}

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system

View File

@@ -0,0 +1,424 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
{% if cilium_enable_prometheus %}
prometheus.io/port: "{{ cilium_agent_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels:
k8s-app: cilium
spec:
containers:
- name: cilium-agent
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
{% if cilium_mtu != "" %}
- --mtu={{ cilium_mtu }}
{% endif %}
{% if cilium_agent_custom_args is string %}
- {{ cilium_agent_custom_args }}
{% else %}
{% for flag in cilium_agent_custom_args %}
- {{ flag }}
{% endfor %}
{% endif %}
startupProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
livenessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
{% for env_var in cilium_agent_extra_env_vars %}
- {{ env_var | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
lifecycle:
postStart:
exec:
command:
- "/cni-install.sh"
- "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}"
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- "--enable-debug={{ cilium_debug | string | lower }}"
- "--log-file={{ cilium_cni_log_file }}"
{% endif %}
preStop:
exec:
command:
- /cni-uninstall.sh
resources:
limits:
cpu: {{ cilium_cpu_limit }}
memory: {{ cilium_memory_limit }}
requests:
cpu: {{ cilium_cpu_requests }}
memory: {{ cilium_memory_requests }}
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
ports:
{% endif %}
{% if cilium_enable_prometheus %}
- name: prometheus
containerPort: {{ cilium_agent_scrape_port }}
hostPort: {{ cilium_agent_scrape_port }}
protocol: TCP
{% endif %}
{% if cilium_enable_hubble_metrics %}
- name: hubble-metrics
containerPort: {{ cilium_hubble_scrape_port }}
hostPort: {{ cilium_hubble_scrape_port }}
protocol: TCP
{% endif %}
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: cilium-run
mountPath: /var/run/cilium
- name: cni-path
mountPath: /host/opt/cni/bin
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
{% if cilium_identity_allocation_mode == "kvstore" %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{cilium_cert_dir}}"
readOnly: true
{% endif %}
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
mountPath: /etc/config
readOnly: true
{% endif %}
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
mountPath: /etc/ipsec
readOnly: true
{% endif %}
{% if cilium_hubble_install %}
- name: hubble-tls
mountPath: /var/lib/cilium/tls/hubble
readOnly: true
{% endif %}
{% for volume_mount in cilium_agent_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
hostNetwork: true
initContainers:
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %}
- name: mount-cgroup
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: CGROUP_ROOT
value: {{ cilium_cgroup_host_root }}
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh and mount that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
securityContext:
privileged: true
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %}
- name: apply-sysctl-overwrites
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
securityContext:
privileged: true
{% endif %}
- name: clean-cilium-state
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
# Removed in 1.11 and up.
# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9
{% if cilium_version | regex_replace('v') is version('1.11', '<') %}
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
configMapKeyRef:
key: wait-bpf-mount
name: cilium-config
optional: true
{% endif %}
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: {{ cilium_cgroup_host_root }}
mountPropagation: HostToContainer
{% endif %}
- name: cilium-run
mountPath: /var/run/cilium
resources:
requests:
cpu: 100m
memory: 100Mi
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
hostNetwork: true
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
k8s-app: cilium
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
# To mount cgroup2 filesystem on the host
- name: hostproc
hostPath:
path: /proc
type: Directory
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: {{ cilium_cgroup_host_root }}
type: DirectoryOrCreate
{% endif %}
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
{% endif %}
# To read the clustermesh configuration
- name: clustermesh-secrets
secret:
secretName: cilium-clustermesh
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
optional: true
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
configMap:
name: ip-masq-agent
optional: true
items:
- key: config
path: ip-masq-agent
{% endif %}
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
secret:
secretName: cilium-ipsec-keys
{% endif %}
{% if cilium_hubble_install %}
- name: hubble-tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-server-certs
optional: true
items:
- key: ca.crt
path: client-ca.crt
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
{% endif %}

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system

View File

@@ -0,0 +1,9 @@
---
apiVersion: v1
data:
keys: {{ cilium_ipsec_key }}
kind: Secret
metadata:
name: cilium-ipsec-keys
namespace: kube-system
type: Opaque

View File

@@ -0,0 +1,87 @@
---
# Source: cilium/templates/hubble-relay-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-relay-config
namespace: kube-system
data:
config.yaml: |
peer-service: unix:///var/run/cilium/hubble.sock
listen-address: :4245
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
tls-client-key-file: /var/lib/hubble-relay/tls/client.key
tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
---
# Source: cilium/templates/hubble-ui-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-ui-envoy
namespace: kube-system
data:
envoy.yaml: |
static_resources:
listeners:
- name: listener_hubble_ui
address:
socket_address:
address: 0.0.0.0
port_value: 8081
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
config:
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ['*']
routes:
- match:
prefix: '/api/'
route:
cluster: backend
max_grpc_timeout: 0s
prefix_rewrite: '/'
- match:
prefix: '/'
route:
cluster: frontend
cors:
allow_origin_string_match:
- prefix: '*'
allow_methods: GET, PUT, DELETE, POST, OPTIONS
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
max_age: '1728000'
expose_headers: grpc-status,grpc-message
http_filters:
- name: envoy.filters.http.grpc_web
- name: envoy.filters.http.cors
- name: envoy.filters.http.router
clusters:
- name: frontend
connect_timeout: 0.25s
type: strict_dns
lb_policy: round_robin
hosts:
- socket_address:
address: 127.0.0.1
port_value: 8080
- name: backend
connect_timeout: 0.25s
type: logical_dns
lb_policy: round_robin
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 8090

View File

@@ -0,0 +1,106 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hubble-generate-certs
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- hubble-server-certs
- hubble-relay-client-certs
- hubble-relay-server-certs
verbs:
- update
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- hubble-ca-cert
verbs:
- update
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- hubble-ca-secret
verbs:
- get
{% endif %}
---
# Source: cilium/templates/hubble-relay-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-relay
rules:
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
---
# Source: cilium/templates/hubble-ui-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- "*"
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,44 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hubble-generate-certs
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-generate-certs
subjects:
- kind: ServiceAccount
name: hubble-generate-certs
namespace: kube-system
{% endif %}
---
# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-relay
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-relay
subjects:
- kind: ServiceAccount
namespace: kube-system
name: hubble-relay
---
# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-ui
subjects:
- kind: ServiceAccount
namespace: kube-system
name: hubble-ui

View File

@@ -0,0 +1,49 @@
---
# Source: cilium/templates/hubble-generate-certs-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: hubble-generate-certs
namespace: kube-system
labels:
k8s-app: hubble-generate-certs
spec:
schedule: "0 0 1 */4 *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
metadata:
labels:
k8s-app: hubble-generate-certs
spec:
serviceAccount: hubble-generate-certs
serviceAccountName: hubble-generate-certs
containers:
- name: certgen
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/usr/bin/cilium-certgen"
# Because this is executed as a job, we pass the values as command
# line args instead of via config map. This allows users to inspect
# the values used in past runs by inspecting the completed pod.
args:
- "--cilium-namespace=kube-system"
- "--hubble-ca-reuse-secret=true"
- "--hubble-ca-secret-name=hubble-ca-secret"
- "--hubble-ca-generate=true"
- "--hubble-ca-validity-duration=94608000s"
- "--hubble-ca-config-map-create=true"
- "--hubble-ca-config-map-name=hubble-ca-cert"
- "--hubble-server-cert-generate=true"
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
- "--hubble-server-cert-validity-duration=94608000s"
- "--hubble-server-cert-secret-name=hubble-server-certs"
- "--hubble-relay-client-cert-generate=true"
- "--hubble-relay-client-cert-validity-duration=94608000s"
- "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
- "--hubble-relay-server-cert-generate=false"
hostNetwork: true
restartPolicy: OnFailure
ttlSecondsAfterFinished: 1800

View File

@@ -0,0 +1,161 @@
---
# Source: cilium/templates/hubble-relay-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hubble-relay
labels:
k8s-app: hubble-relay
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-relay
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-relay
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "k8s-app"
operator: In
values:
- cilium
topologyKey: "kubernetes.io/hostname"
containers:
- name: hubble-relay
image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- hubble-relay
args:
- serve
ports:
- name: grpc
containerPort: 4245
readinessProbe:
tcpSocket:
port: grpc
livenessProbe:
tcpSocket:
port: grpc
volumeMounts:
- mountPath: /var/run/cilium
name: hubble-sock-dir
readOnly: true
- mountPath: /etc/hubble-relay
name: config
readOnly: true
- mountPath: /var/lib/hubble-relay/tls
name: tls
readOnly: true
restartPolicy: Always
serviceAccount: hubble-relay
serviceAccountName: hubble-relay
terminationGracePeriodSeconds: 0
volumes:
- configMap:
name: hubble-relay-config
items:
- key: config.yaml
path: config.yaml
name: config
- hostPath:
path: /var/run/cilium
type: Directory
name: hubble-sock-dir
- projected:
sources:
- secret:
name: hubble-relay-client-certs
items:
- key: tls.crt
path: client.crt
- key: tls.key
path: client.key
- configMap:
name: hubble-ca-cert
items:
- key: ca.crt
path: hubble-server-ca.crt
name: tls
---
# Source: cilium/templates/hubble-ui-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: kube-system
labels:
k8s-app: hubble-ui
name: hubble-ui
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-ui
template:
metadata:
annotations:
labels:
k8s-app: hubble-ui
spec:
securityContext:
runAsUser: 1001
serviceAccount: hubble-ui
serviceAccountName: hubble-ui
containers:
- name: frontend
image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 8080
name: http
resources:
{}
- name: backend
image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: EVENTS_SERVER_PORT
value: "8090"
- name: FLOWS_API_ADDR
value: "hubble-relay:80"
ports:
- containerPort: 8090
name: grpc
resources:
{}
- name: proxy
image: "{{ cilium_hubble_envoy_image_repo }}:{{ cilium_hubble_envoy_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 8081
name: http
resources:
{}
command: ["envoy"]
args:
[
"-c",
"/etc/envoy.yaml",
"-l",
"info"
]
volumeMounts:
- name: hubble-ui-envoy-yaml
mountPath: /etc/envoy.yaml
subPath: envoy.yaml
volumes:
- name: hubble-ui-envoy-yaml
configMap:
name: hubble-ui-envoy

View File

@@ -0,0 +1,45 @@
---
# Source: cilium/templates/hubble-generate-certs-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: hubble-generate-certs
namespace: kube-system
labels:
k8s-app: hubble-generate-certs
spec:
template:
metadata:
labels:
k8s-app: hubble-generate-certs
spec:
serviceAccount: hubble-generate-certs
serviceAccountName: hubble-generate-certs
containers:
- name: certgen
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/usr/bin/cilium-certgen"
# Because this is executed as a job, we pass the values as command
# line args instead of via config map. This allows users to inspect
# the values used in past runs by inspecting the completed pod.
args:
- "--cilium-namespace=kube-system"
- "--hubble-ca-reuse-secret=true"
- "--hubble-ca-secret-name=hubble-ca-secret"
- "--hubble-ca-generate=true"
- "--hubble-ca-validity-duration=94608000s"
- "--hubble-ca-config-map-create=true"
- "--hubble-ca-config-map-name=hubble-ca-cert"
- "--hubble-server-cert-generate=true"
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
- "--hubble-server-cert-validity-duration=94608000s"
- "--hubble-server-cert-secret-name=hubble-server-certs"
- "--hubble-relay-client-cert-generate=true"
- "--hubble-relay-client-cert-validity-duration=94608000s"
- "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
- "--hubble-relay-server-cert-generate=false"
hostNetwork: true
restartPolicy: OnFailure
ttlSecondsAfterFinished: 1800

View File

@@ -0,0 +1,23 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-generate-certs
namespace: kube-system
{% endif %}
---
# Source: cilium/templates/hubble-relay-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-relay
namespace: kube-system
---
# Source: cilium/templates/hubble-ui-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-ui
namespace: kube-system

View File

@@ -0,0 +1,58 @@
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
---
# Source: cilium/templates/cilium-agent-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-metrics
namespace: kube-system
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: "9091"
labels:
k8s-app: hubble
spec:
clusterIP: None
type: ClusterIP
ports:
- name: hubble-metrics
port: 9091
protocol: TCP
targetPort: hubble-metrics
selector:
k8s-app: cilium
{% endif %}
---
# Source: cilium/templates/hubble-relay-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-relay
namespace: kube-system
labels:
k8s-app: hubble-relay
spec:
type: ClusterIP
selector:
k8s-app: hubble-relay
ports:
- protocol: TCP
port: 80
targetPort: 4245
---
# Source: cilium/templates/hubble-ui-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-ui
labels:
k8s-app: hubble-ui
namespace: kube-system
spec:
selector:
k8s-app: hubble-ui
ports:
- name: http
port: 80
targetPort: 8081
type: ClusterIP