This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,93 @@
---
# Limits for coredns
dns_memory_limit: 300Mi
dns_cpu_requests: 100m
dns_memory_requests: 70Mi
dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}"
dns_nodes_per_replica: 16
dns_cores_per_replica: 256
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}"
enable_coredns_reverse_dns_lookups: true
coredns_ordinal_suffix: ""
# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
coredns_deployment_nodeselector: "kubernetes.io/os: linux"
coredns_default_zone_cache_block: |
cache 30
# dns_upstream_forward_extra_opts apply to coredns forward section as well as nodelocaldns upstream target forward section
# dns_upstream_forward_extra_opts:
# policy: sequential
# nodelocaldns
nodelocaldns_cpu_requests: 100m
nodelocaldns_memory_limit: 200Mi
nodelocaldns_memory_requests: 70Mi
nodelocaldns_ds_nodeselector: "kubernetes.io/os: linux"
nodelocaldns_prometheus_port: 9253
nodelocaldns_secondary_prometheus_port: 9255
# Limits for dns-autoscaler
dns_autoscaler_cpu_requests: 20m
dns_autoscaler_memory_requests: 10Mi
dns_autoscaler_deployment_nodeselector: "kubernetes.io/os: linux"
# dns_autoscaler_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
# etcd metrics
# etcd_metrics_service_labels:
# k8s-app: etcd
# app.kubernetes.io/managed-by: Kubespray
# app: kube-prometheus-stack-kube-etcd
# release: prometheus-stack
# Netchecker
deploy_netchecker: false
netchecker_port: 31081
agent_report_interval: 15
netcheck_namespace: default
# Limits for netchecker apps
netchecker_agent_cpu_limit: 30m
netchecker_agent_memory_limit: 100M
netchecker_agent_cpu_requests: 15m
netchecker_agent_memory_requests: 64M
netchecker_server_cpu_limit: 100m
netchecker_server_memory_limit: 256M
netchecker_server_cpu_requests: 50m
netchecker_server_memory_requests: 64M
netchecker_etcd_cpu_limit: 200m
netchecker_etcd_memory_limit: 256M
netchecker_etcd_cpu_requests: 100m
netchecker_etcd_memory_requests: 128M
# SecurityContext when PodSecurityPolicy is enabled
netchecker_agent_user: 1000
netchecker_server_user: 1000
netchecker_agent_group: 1000
netchecker_server_group: 1000
# Dashboard
dashboard_replicas: 1
# Namespace for dashboard
dashboard_namespace: kube-system
# Limits for dashboard
dashboard_cpu_limit: 100m
dashboard_memory_limit: 256M
dashboard_cpu_requests: 50m
dashboard_memory_requests: 64M
# Set dashboard_use_custom_certs to true if overriding dashboard_certs_secret_name with a secret that
# contains dashboard_tls_key_file and dashboard_tls_cert_file instead of using the initContainer provisioned certs
dashboard_use_custom_certs: false
dashboard_certs_secret_name: kubernetes-dashboard-certs
dashboard_tls_key_file: dashboard.key
dashboard_tls_cert_file: dashboard.crt
dashboard_master_toleration: true
# Override dashboard default settings
dashboard_token_ttl: 900
dashboard_skip_login: false
# Policy Controllers
# policy_controller_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]

View File

@@ -0,0 +1,44 @@
---
- name: Kubernetes Apps | Register coredns deployment annotation `createdby`
command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
register: createdby_annotation_deploy
changed_when: false
check_mode: false
ignore_errors: true # noqa ignore-errors
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Register coredns service annotation `createdby`
command: "{{ kubectl }} get svc -n kube-system coredns -o jsonpath='{ .metadata.annotations.createdby }'"
register: createdby_annotation_svc
changed_when: false
check_mode: false
ignore_errors: true # noqa ignore-errors
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Delete kubeadm CoreDNS
kube:
name: "coredns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "deploy"
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- createdby_annotation_deploy.stdout != 'kubespray'
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
kube:
name: "kube-dns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "svc"
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
- createdby_annotation_svc.stdout != 'kubespray'

View File

@@ -0,0 +1,44 @@
---
- name: Kubernetes Apps | Lay Down CoreDNS templates
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
loop:
- { name: coredns, file: coredns-clusterrole.yml, type: clusterrole }
- { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding }
- { name: coredns, file: coredns-config.yml, type: configmap }
- { name: coredns, file: coredns-deployment.yml, type: deployment }
- { name: coredns, file: coredns-sa.yml, type: sa }
- { name: coredns, file: coredns-svc.yml, type: svc }
- { name: dns-autoscaler, file: dns-autoscaler.yml, type: deployment }
- { name: dns-autoscaler, file: dns-autoscaler-clusterrole.yml, type: clusterrole }
- { name: dns-autoscaler, file: dns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
- { name: dns-autoscaler, file: dns-autoscaler-sa.yml, type: sa }
register: coredns_manifests
vars:
clusterIP: "{{ skydns_server }}"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template
template:
src: "{{ item.src }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment }
- { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc }
- { name: dns-autoscaler, src: dns-autoscaler.yml, file: coredns-autoscaler-secondary.yml, type: deployment }
register: coredns_secondary_manifests
vars:
clusterIP: "{{ skydns_server_secondary }}"
coredns_ordinal_suffix: "-secondary"
when:
- dns_mode == 'coredns_dual'
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns

View File

@@ -0,0 +1,21 @@
---
- name: Kubernetes Apps | Lay down dashboard template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
register: manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start dashboard
kube:
name: "{{ item.item.name }}"
namespace: "{{ dashboard_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,22 @@
---
- name: Kubernetes Apps | Lay down etcd_metrics templates
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { file: etcd_metrics-endpoints.yml, type: endpoints, name: etcd-metrics }
- { file: etcd_metrics-service.yml, type: service, name: etcd-metrics }
register: manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start etcd_metrics
kube:
name: "{{ item.item.name }}"
namespace: kube-system
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,82 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result
until: result.status == 200
retries: 20
delay: 1
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Cleanup DNS
import_tasks: cleanup_dns.yml
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- coredns
- nodelocaldns
- name: Kubernetes Apps | CoreDNS
import_tasks: "coredns.yml"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
- name: Kubernetes Apps | nodelocalDNS
import_tasks: "nodelocaldns.yml"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- name: Kubernetes Apps | Start Resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ coredns_manifests.results | default({}) }}"
- "{{ coredns_secondary_manifests.results | default({}) }}"
- "{{ nodelocaldns_manifests.results | default({}) }}"
- "{{ nodelocaldns_second_manifests.results | default({}) }}"
when:
- dns_mode != 'none'
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
register: resource_result
until: resource_result is succeeded
retries: 4
delay: 5
tags:
- coredns
- nodelocaldns
loop_control:
label: "{{ item.item.file }}"
- name: Kubernetes Apps | Etcd metrics endpoints
import_tasks: etcd_metrics.yml
when: etcd_metrics_port is defined and etcd_metrics_service_labels is defined
tags:
- etcd_metrics
- name: Kubernetes Apps | Netchecker
import_tasks: netchecker.yml
when: deploy_netchecker
tags:
- netchecker
- name: Kubernetes Apps | Dashboard
import_tasks: dashboard.yml
when: dashboard_enabled
tags:
- dashboard

View File

@@ -0,0 +1,56 @@
---
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
when:
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
set_fact:
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Netchecker Templates list
set_fact:
netchecker_templates:
- {file: netchecker-ns.yml, type: ns, name: netchecker-namespace}
- {file: netchecker-agent-sa.yml, type: sa, name: netchecker-agent}
- {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
- {file: netchecker-server-sa.yml, type: sa, name: netchecker-server}
- {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server}
- {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server}
- {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server}
- {file: netchecker-server-svc.yml, type: svc, name: netchecker-service}
netchecker_templates_for_psp:
- {file: netchecker-agent-hostnet-psp.yml, type: podsecuritypolicy, name: netchecker-agent-hostnet-policy}
- {file: netchecker-agent-hostnet-clusterrole.yml, type: clusterrole, name: netchecker-agent}
- {file: netchecker-agent-hostnet-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-agent}
- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
set_fact:
netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}"
when: podsecuritypolicy_enabled
- name: Kubernetes Apps | Lay Down Netchecker Template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items: "{{ netchecker_templates }}"
register: manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start Netchecker Resources
kube:
name: "{{ item.item.name }}"
namespace: "{{ netcheck_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -0,0 +1,75 @@
---
- name: Kubernetes Apps | set up necessary nodelocaldns parameters
set_fact:
primaryClusterIP: >-
{%- if dns_mode in ['coredns', 'coredns_dual'] -%}
{{ skydns_server }}
{%- elif dns_mode == 'manual' -%}
{{ manual_dns_server }}
{%- endif -%}
secondaryclusterIP: "{{ skydns_server_secondary }}"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns
- name: Kubernetes Apps | Lay Down nodelocaldns Template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { name: nodelocaldns, file: nodelocaldns-config.yml, type: configmap }
- { name: nodelocaldns, file: nodelocaldns-sa.yml, type: sa }
- { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset }
register: nodelocaldns_manifests
vars:
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
{%- else -%}
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- else -%}
/etc/resolv.conf
{%- endif -%}
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns
- name: Kubernetes Apps | Lay Down nodelocaldns-secondary Template
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0644
with_items:
- { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset }
register: nodelocaldns_second_manifests
vars:
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
{%- else -%}
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- else -%}
/etc/resolv.conf
{%- endif -%}
when:
- enable_nodelocaldns
- enable_nodelocaldns_secondary
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns

View File

@@ -0,0 +1,32 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch

View File

@@ -0,0 +1,18 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system

View File

@@ -0,0 +1,74 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %}
{% for block in coredns_external_zones %}
{{ block['zones'] | join(' ') }} {
log
errors
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
{% endif %}
forward . {{ block['nameservers'] | join(' ') }}
loadbalance
cache {{ block['cache'] | default(5) }}
reload
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endfor %}
{% endif %}
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes {{ dns_domain }} {% if enable_coredns_reverse_dns_lookups %}in-addr.arpa ip6.arpa {% endif %}{
pods insecure
{% if enable_coredns_k8s_endpoint_pod_names %}
endpoint_pod_names
{% endif %}
{% if enable_coredns_reverse_dns_lookups %}
fallthrough in-addr.arpa ip6.arpa
{% endif %}
}
prometheus :9153
forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} {
prefer_udp
max_concurrent 1000
{% if dns_upstream_forward_extra_opts is defined %}
{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %}
{{ optname }} {{ optvalue }}
{% endfor %}
{% endif %}
}
{% if enable_coredns_k8s_external %}
k8s_external {{ coredns_k8s_external_zone }}
{% endif %}
{{ coredns_default_zone_cache_block | indent(width=8, first=False) }}
loop
reload
loadbalance
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% if dns_etchosts | default(None) %}
hosts: |
{{ dns_etchosts | indent(width=4, first=False) }}
{% endif %}

View File

@@ -0,0 +1,119 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "coredns{{ coredns_ordinal_suffix }}"
namespace: kube-system
labels:
k8s-app: "kube-dns{{ coredns_ordinal_suffix }}"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}"
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 10%
selector:
matchLabels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
template:
metadata:
labels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
createdby: 'kubespray'
spec:
nodeSelector:
{{ coredns_deployment_nodeselector }}
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% if dns_extra_tolerations is defined %}
{{ dns_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }}
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ""
containers:
- name: coredns
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
{% if dns_etchosts | default(None) %}
- key: hosts
path: hosts
{% endif %}

View File

@@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,28 @@
---
apiVersion: v1
kind: Service
metadata:
name: coredns{{ coredns_ordinal_suffix }}
namespace: kube-system
labels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
kubernetes.io/name: "coredns{{ coredns_ordinal_suffix }}"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
createdby: 'kubespray'
spec:
selector:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
clusterIP: {{ clusterIP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@@ -0,0 +1,339 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
{% if dashboard_namespace != "kube-system" %}
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ dashboard_namespace }}
labels:
name: {{ dashboard_namespace }}
{% endif %}
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: {{ dashboard_namespace }}
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: {{ dashboard_namespace }}
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: {{ dashboard_namespace }}
type: Opaque
---
# ------------------- Dashboard ConfigMap ------------------- #
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: {{ dashboard_namespace }}
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
spec:
replicas: {{ dashboard_replicas }}
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dashboard_cpu_limit }}
memory: {{ dashboard_memory_limit }}
requests:
cpu: {{ dashboard_cpu_requests }}
memory: {{ dashboard_memory_requests }}
ports:
- containerPort: 8443
protocol: TCP
args:
- --namespace={{ dashboard_namespace }}
{% if dashboard_use_custom_certs %}
- --tls-key-file={{ dashboard_tls_key_file }}
- --tls-cert-file={{ dashboard_tls_cert_file }}
{% else %}
- --auto-generate-certificates
{% endif %}
{% if dashboard_skip_login %}
- --enable-skip-login
{% endif %}
- --authentication-mode=token
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
- --token-ttl={{ dashboard_token_ttl }}
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: {{ dashboard_certs_secret_name }}
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
{% if dashboard_master_toleration %}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% endif %}
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ dashboard_namespace }}
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Metrics Scrapper Service Account ------------------- #
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# ------------------- Metrics Scrapper Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
name: dashboard-metrics-scraper
namespace: {{ dashboard_namespace }}
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: kubernetes-metrics-scraper
---
# ------------------- Metrics Scrapper Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
name: kubernetes-metrics-scraper
namespace: {{ dashboard_namespace }}
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-metrics-scraper
template:
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-metrics-scraper
image: {{ dashboard_metrics_scraper_repo }}:{{ dashboard_metrics_scraper_tag }}
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
serviceAccountName: kubernetes-dashboard
volumes:
- name: tmp-volume
emptyDir: {}
{% if dashboard_master_toleration %}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% endif %}

View File

@@ -0,0 +1,34 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]

View File

@@ -0,0 +1,29 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:dns-autoscaler
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,22 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ServiceAccount
apiVersion: v1
metadata:
name: dns-autoscaler
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,87 @@
---
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: dns-autoscaler{{ coredns_ordinal_suffix }}
namespace: kube-system
labels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
template:
metadata:
labels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
nodeSelector:
{{ dns_autoscaler_deployment_nodeselector}}
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
{% if dns_autoscaler_extra_tolerations is defined %}
{{ dns_autoscaler_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }}
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ""
containers:
- name: autoscaler
image: "{{ dnsautoscaler_image_repo }}:{{ dnsautoscaler_image_tag }}"
resources:
requests:
cpu: {{ dns_autoscaler_cpu_requests }}
memory: {{ dns_autoscaler_memory_requests }}
readinessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --default-params={"linear":{"preventSinglePointFailure":{{ dns_prevent_single_point_failure }},"coresPerReplica":{{ dns_cores_per_replica }},"nodesPerReplica":{{ dns_nodes_per_replica }},"min":{{ dns_min_replicas }}}}
- --logtostderr=true
- --v=2
- --configmap=dns-autoscaler{{ coredns_ordinal_suffix }}
- --target=Deployment/coredns{{ coredns_ordinal_suffix }}
serviceAccountName: dns-autoscaler

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Endpoints
metadata:
name: etcd-metrics
namespace: kube-system
labels:
k8s-app: etcd
app.kubernetes.io/managed-by: Kubespray
subsets:
{% for etcd_metrics_address, etcd_host in etcd_metrics_addresses.split(',') | zip(etcd_hosts) %}
- addresses:
- ip: {{ etcd_metrics_address | urlsplit('hostname') }}
targetRef:
kind: Node
name: {{ etcd_host }}
ports:
- name: http-metrics
port: {{ etcd_metrics_address | urlsplit('port') }}
protocol: TCP
{% endfor %}

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: etcd-metrics
namespace: kube-system
labels:
{{ etcd_metrics_service_labels | to_yaml(indent=2, width=1337) | indent(width=4) }}
spec:
ports:
- name: http-metrics
protocol: TCP
port: {{ etcd_metrics_port }}
# targetPort:

View File

@@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: netchecker-agent
name: netchecker-agent
namespace: {{ netcheck_namespace }}
spec:
selector:
matchLabels:
app: netchecker-agent
template:
metadata:
name: netchecker-agent
labels:
app: netchecker-agent
spec:
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
tolerations:
- effect: NoSchedule
operator: Exists
nodeSelector:
kubernetes.io/os: linux
containers:
- name: netchecker-agent
image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"
- "-serverendpoint=netchecker-service:8081"
- "-reportinterval={{ agent_report_interval }}"
resources:
limits:
cpu: {{ netchecker_agent_cpu_limit }}
memory: {{ netchecker_agent_memory_limit }}
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
securityContext:
runAsUser: {{ netchecker_agent_user | default('0') }}
runAsGroup: {{ netchecker_agent_group | default('0') }}
serviceAccountName: netchecker-agent
updateStrategy:
rollingUpdate:
maxUnavailable: 100%
type: RollingUpdate

View File

@@ -0,0 +1,14 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
rules:
- apiGroups:
- policy
resourceNames:
- netchecker-agent-hostnet
resources:
- podsecuritypolicies
verbs:
- use

View File

@@ -0,0 +1,13 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
subjects:
- kind: ServiceAccount
name: netchecker-agent
namespace: {{ netcheck_namespace }}
roleRef:
kind: ClusterRole
name: psp:netchecker-agent-hostnet
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: netchecker-agent-hostnet
name: netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
spec:
selector:
matchLabels:
app: netchecker-agent-hostnet
template:
metadata:
name: netchecker-agent-hostnet
labels:
app: netchecker-agent-hostnet
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: netchecker-agent
image: "{{ netcheck_agent_image_repo }}:{{ netcheck_agent_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"
- "-serverendpoint=netchecker-service:8081"
- "-reportinterval={{ agent_report_interval }}"
resources:
limits:
cpu: {{ netchecker_agent_cpu_limit }}
memory: {{ netchecker_agent_memory_limit }}
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
securityContext:
runAsUser: {{ netchecker_agent_user | default('0') }}
runAsGroup: {{ netchecker_agent_group | default('0') }}
serviceAccountName: netchecker-agent
updateStrategy:
rollingUpdate:
maxUnavailable: 100%
type: RollingUpdate

View File

@@ -0,0 +1,44 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: netchecker-agent-hostnet
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: netchecker-agent
namespace: {{ netcheck_namespace }}

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: "{{ netcheck_namespace }}"
labels:
name: "{{ netcheck_namespace }}"

View File

@@ -0,0 +1,9 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get"]

View File

@@ -0,0 +1,13 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
subjects:
- kind: ServiceAccount
name: netchecker-server
namespace: {{ netcheck_namespace }}
roleRef:
kind: ClusterRole
name: netchecker-server
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,83 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
labels:
app: netchecker-server
spec:
replicas: 1
selector:
matchLabels:
app: netchecker-server
template:
metadata:
name: netchecker-server
labels:
app: netchecker-server
spec:
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
volumes:
- name: etcd-data
emptyDir: {}
containers:
- name: netchecker-server
image: "{{ netcheck_server_image_repo }}:{{ netcheck_server_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_server_cpu_limit }}
memory: {{ netchecker_server_memory_limit }}
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }}
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8081
args:
- -v=5
- -logtostderr
- -kubeproxyinit=false
- -endpoint=0.0.0.0:8081
- -etcd-endpoints=http://127.0.0.1:2379
- name: etcd
image: "{{ etcd_image_repo }}:{{ netcheck_etcd_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- etcd
- --listen-client-urls=http://127.0.0.1:2379
- --advertise-client-urls=http://127.0.0.1:2379
- --data-dir=/var/lib/etcd
- --enable-v2
- --force-new-cluster
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd-data
resources:
limits:
cpu: {{ netchecker_etcd_cpu_limit }}
memory: {{ netchecker_etcd_memory_limit }}
requests:
cpu: {{ netchecker_etcd_cpu_requests }}
memory: {{ netchecker_etcd_memory_requests }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }}
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
tolerations:
- effect: NoSchedule
operator: Exists
serviceAccountName: netchecker-server

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: netchecker-service
namespace: {{ netcheck_namespace }}
spec:
selector:
app: netchecker-server
ports:
-
protocol: TCP
port: 8081
targetPort: 8081
nodePort: {{ netchecker_port }}
type: NodePort

View File

@@ -0,0 +1,182 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
{% for block in nodelocaldns_external_zones %}
{{ block['zones'] | join(' ') }} {
errors
cache {{ block['cache'] | default(30) }}
reload
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
{% endif %}
loop
bind {{ nodelocaldns_ip }}
forward . {{ block['nameservers'] | join(' ') }}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
log
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endfor %}
{% endif %}
{{ dns_domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
health {{ nodelocaldns_ip }}:{{ nodelocaldns_health_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
}
.:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} {
{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %}
{{ optname }} {{ optvalue }}
{% endfor %}
}{% endif %}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% if enable_nodelocaldns_secondary %}
Corefile-second: |
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
{% for block in nodelocaldns_external_zones %}
{{ block['zones'] | join(' ') }} {
errors
cache {{ block['cache'] | default(30) }}
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ block['nameservers'] | join(' ') }}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
log
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endfor %}
{% endif %}
{{ dns_domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
health {{ nodelocaldns_ip }}:{{ nodelocaldns_second_health_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ forwardTarget }} {
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
}
.:53 {
errors
cache 30
reload
loop
bind {{ nodelocaldns_ip }}
forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} {
{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %}
{{ optname }} {{ optvalue }}
{% endfor %}
}{% endif %}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
}
{% endif %}
{% if dns_etchosts | default(None) %}
hosts: |
{{ dns_etchosts | indent(width=4, first=False) }}
{% endif %}

View File

@@ -0,0 +1,115 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns
template:
metadata:
labels:
k8s-app: nodelocaldns
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '{{ nodelocaldns_prometheus_port }}'
spec:
nodeSelector:
{{ nodelocaldns_ds_nodeselector }}
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
containers:
- name: node-cache
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}"
resources:
limits:
memory: {{ nodelocaldns_memory_limit }}
requests:
cpu: {{ nodelocaldns_cpu_requests }}
memory: {{ nodelocaldns_memory_requests }}
args:
- -localip
- {{ nodelocaldns_ip }}
- -conf
- /etc/coredns/Corefile
- -upstreamsvc
- coredns
{% if enable_nodelocaldns_secondary %}
- -skipteardown
{% else %}
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
{% endif %}
securityContext:
privileged: true
{% if nodelocaldns_bind_metrics_host_ip %}
env:
- name: MY_HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{% endif %}
livenessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile
path: Corefile
{% if dns_etchosts | default(None) %}
- key: hosts
path: hosts
{% endif %}
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,103 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns-second
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns-second
template:
metadata:
labels:
k8s-app: nodelocaldns-second
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '{{ nodelocaldns_secondary_prometheus_port }}'
spec:
nodeSelector:
{{ nodelocaldns_ds_nodeselector }}
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
containers:
- name: node-cache
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}"
resources:
limits:
memory: {{ nodelocaldns_memory_limit }}
requests:
cpu: {{ nodelocaldns_cpu_requests }}
memory: {{ nodelocaldns_memory_requests }}
args: [ "-localip", "{{ nodelocaldns_ip }}", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns", "-skipteardown" ]
securityContext:
privileged: true
{% if nodelocaldns_bind_metrics_host_ip %}
env:
- name: MY_HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{% endif %}
livenessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: {{ nodelocaldns_ip }}
path: /health
port: {{ nodelocaldns_health_port }}
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
lifecycle:
preStop:
exec:
command:
- sh
- -c
- sleep {{ nodelocaldns_secondary_skew_seconds }} && kill -9 1
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile-second
path: Corefile
{% if dns_etchosts | default(None) %}
- key: hosts
path: hosts
{% endif %}
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Implement a time skew between the main nodelocaldns and this secondary.
# Since the two nodelocaldns instances share the :53 port, we want to keep
# at least one running at any time enven if the manifests are replaced simultaneously
terminationGracePeriodSeconds: {{ nodelocaldns_secondary_skew_seconds }}
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate