Clean Code

This commit is contained in:
dsk-minchulahn
2023-12-19 13:03:29 +09:00
parent 947561ce1d
commit 0273450ff6
4237 changed files with 0 additions and 7447 deletions

5
01-old/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
.terraform/
aws_kubernetes/aws_kops_prod/terraform/.terraform.lock.hcl
aws_kubernetes/aws_kops_prod/terraform/terraform.tfstate
aws_kubernetes/aws_kops_prod/terraform/99_variables.tf
aws_kubernetes/aws_kops_prod/terraform/terraform.tfstate.backup

View File

@@ -0,0 +1,14 @@
cd terraform/tf-datasaker/
terraform init
terraform plan
terraform apply
// terraform destroy
# aws set eip to bastion assosiation
# bastion
1. copy autorized_keys for bastion
2. install kubectl
3. install helm

View File

@@ -0,0 +1,421 @@
export KOPS_STATE_STORE=s3://clusters.dev.datasaker.io
kops create cluster \
--name dev.datasaker.io \
--vpc vpc-03cbb88e181ccb46e \
--cloud aws \
--state s3://clusters.dev.datasaker.io \
--ssh-public-key /home/hsgahm/id_rsa_k8s.pub \
--topology private --kubernetes-version "1.23.10" \
--network-cidr "172.21.0.0/16" \
--networking calico \
--container-runtime containerd \
--image ami-0ea5eb4b05645aa8a \
--zones ap-northeast-2a,ap-northeast-2b,ap-northeast-2c \
--master-count 3 \
--master-size t3.small \
--master-volume-size 50 \
--node-count 3 \
--node-size t3.small \
--node-volume-size 100 \
--utility-subnets "subnet-0d762a41fb41d63e5,subnet-0b4f418020349fb84,subnet-05b9f4f02955c3307" \
--subnets "subnet-021536c4f12971c74,subnet-0c90842daa15aa7c7,subnet-0ae3ab7ae241fe761" \
-v 10
kops get instancegroups --name=dev.datasaker.io --state s3://clusters.dev.datasaker.io
kops --name=dev.datasaker.io delete instancegroup nodes-ap-northeast-2a
kops --name=dev.datasaker.io delete instancegroup nodes-ap-northeast-2b
kops --name=dev.datasaker.io delete instancegroup nodes-ap-northeast-2c
kops edit instancegroup --name=dev.datasaker.io master-ap-northeast-2a
kops edit instancegroup --name=dev.datasaker.io master-ap-northeast-2b
kops edit instancegroup --name=dev.datasaker.io master-ap-northeast-2c
rootVolumeSize: 50
kops --name=dev.datasaker.io get ig
kops get clusters
kops edit cluster dev.datasaker.io --state s3://clusters.dev.datasaker.io
// subnet name 변경
//ap-northeast-2a -> sbn-dev-a.datasaker
//ap-northeast-2b -> sbn-dev-b.datasaker
//ap-northeast-2c -> sbn-dev-c.datasaker
//utility-ap-northeast-2a -> sbn-dmz-a.datasaker
//utility-ap-northeast-2b -> sbn-dmz-b.datasaker
//utility-ap-northeast-2c -> sbn-dmz-c.datasaker
kops edit instancegroups --name=dev.datasaker.io master-ap-northeast-2a
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-09-06T05:44:09Z"
labels:
kops.k8s.io/cluster: dev.datasaker.io
name: master-ap-northeast-2a
spec:
image: ami-0ea5eb4b05645aa8a
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: t3.small
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2a
role: Master
rootVolumeSize: 50
subnets:
- ap-northeast-2a
cloudLabels:
autoscale-off: "True"
autoscale-on: "True"
```
//kops create instancegroup --name=dev.datasaker.io dev-master-a --role master --subnet "ap-northeast-2a"
//kops create instancegroup --name=dev.datasaker.io dev-master-b --role node --subnet "ap-northeast-2b"
//kops create instancegroup --name=dev.datasaker.io dev-master-c --role node --subnet "ap-northeast-2c"
// kops delete instancegroup --name=dev.datasaker.io dev-data-a
// kops delete instancegroup --name=dev.datasaker.io dev-data-b
// kops delete instancegroup --name=dev.datasaker.io dev-data-c
kops create instancegroup --name=dev.datasaker.io dev-data-a --role node --subnet "ap-northeast-2a"
kops edit instancegroup --name=dev.datasaker.io dev-data-a
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-09-05T05:53:59Z"
labels:
kops.k8s.io/cluster: dev.datasaker.io
name: dev-data-a
spec:
image: ami-0ea5eb4b05645aa8a
machineType: m5.4xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: dev-data-a
datasaker/group: data
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
```
```
cloudLabels:
autoscale-off: "True"
autoscale-on: "True"
```
kops create instancegroup --name=dev.datasaker.io dev-data-b --role node --subnet "ap-northeast-2b"
kops edit instancegroup --name=dev.datasaker.io dev-data-b
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-09-05T06:00:56Z"
generation: 1
labels:
kops.k8s.io/cluster: dev.datasaker.io
name: dev-data-b
spec:
image: ami-0ea5eb4b05645aa8a
machineType: m5.4xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-b
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2b
```
kops create instancegroup --name=dev.datasaker.io dev-data-c --role node --subnet "ap-northeast-2c"
kops edit instancegroup --name=dev.datasaker.io dev-data-c
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: null
name: dev-data-c
spec:
image: ami-0ea5eb4b05645aa8a
machineType: m5.4xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: dev-data-c
datasaker/group: data
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2c
```
// kops delete instancegroup --name=dev.datasaker.io dev-process-a
// kops delete instancegroup --name=dev.datasaker.io dev-process-b
// kops delete instancegroup --name=dev.datasaker.io dev-process-c
kops create instancegroup --name=dev.datasaker.io dev-process-a --role node --subnet "ap-northeast-2a"
kops edit instancegroup --name=dev.datasaker.io dev-process-a
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: null
name: dev-process-a
spec:
image: ami-0ea5eb4b05645aa8a
machineType: c5.xlarge
manager: CloudGroup
maxSize: 2
minSize: 2
nodeLabels:
kops.k8s.io/instancegroup: dev-process-a
datasaker/group: process
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
```
kops create instancegroup --name=dev.datasaker.io dev-process-b --role node --subnet "ap-northeast-2b"
kops edit instancegroup --name=dev.datasaker.io dev-process-b
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-09-05T06:10:03Z"
labels:
kops.k8s.io/cluster: dev.datasaker.io
name: dev-process-b
spec:
image: ami-0ea5eb4b05645aa8a
machineType: c5.xlarge
manager: CloudGroup
maxSize: 2
minSize: 2
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-b
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2b
```
kops create instancegroup --name=dev.datasaker.io dev-process-c --role node --subnet "ap-northeast-2c"
kops edit instancegroup --name=dev.datasaker.io dev-process-c
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-09-05T06:10:59Z"
labels:
kops.k8s.io/cluster: dev.datasaker.io
name: dev-process-c
spec:
image: ami-0ea5eb4b05645aa8a
machineType: c5.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-c
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2c
```
kops create instancegroup --name=dev.datasaker.io dev-mgmt-a --role node --subnet "ap-northeast-2a"
kops edit instancegroup --name=dev.datasaker.io dev-mgmt-a
```
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: null
name: dev-mgmt-a
spec:
image: ami-0ea5eb4b05645aa8a
machineType: c5.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: dev-mgmt-a
datasaker/group: mgmt
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
```
kops create instancegroup --name=dev.datasaker.io dev-mgmt-b --role node --subnet "ap-northeast-2b"
kops edit instancegroup --name=dev.datasaker.io dev-mgmt-b
<!-- data instance group 변경
t3.small
maxSize: 2
minSize: 1
rootVolumeSize: 100
nodeLabels:
유지. kops.k8s.io/instancegroup: XXXX
datasaker/group: data -->
// cloudLabels:
// autoscale-off: "True"
// autoscale-on: "True"
kops edit instancegroup --name=dev.datasaker.io dev-data-a
kops edit instancegroup --name=dev.datasaker.io dev-data-b
kops edit instancegroup --name=dev.datasaker.io dev-data-c
kops get --state s3://clusters.dev.datasaker.io --name dev.datasaker.io -o yaml > dev.datasaker.io.yaml
vi dev.datasaker.io.yaml
subnets:
- cidr: 172.21.1.0/24
name: ap-northeast-2a
type: Private
zone: ap-northeast-2a
- cidr: 172.21.2.0/24
name: ap-northeast-2b
type: Private
zone: ap-northeast-2b
- cidr: 172.21.3.0/24
name: ap-northeast-2c
type: Private
zone: ap-northeast-2c
- cidr: 172.21.0.48/28
name: utility-ap-northeast-2a
type: Utility
zone: ap-northeast-2a
- cidr: 172.21.0.64/28
name: utility-ap-northeast-2b
type: Utility
zone: ap-northeast-2b
- cidr: 172.21.0.80/28
name: utility-ap-northeast-2c
type: Utility
zone: ap-northeast-2c
export KOPS_STATE_STORE=s3://clusters.dev.datasaker.io
// kops delete cluster dev.datasaker.io --yes --state=s3://clusters.dev.datasaker.io
// kops delete -f=./dev.datasaker.io.yaml --yes
kops create -f=./dev.datasaker.io.yaml --state=s3://clusters.dev.datasaker.io
kops update cluster dev.datasaker.io --yes --admin --state=s3://clusters.dev.datasaker.io
kops export kubecfg --admin --state=s3://clusters.dev.datasaker.io
kops export kubecfg --admin --kubeconfig ~/workspace/kubeconfig --state=s3://clusters.dev.datasaker.io
kops get secrets sshpublickey admin
kops get secrets sshpublickey admin -oplaintext
MgUKqpCUHLaEcYEuHXTM7ljlTpsnNYSs
ssh admin@3.35.41.191
kops create instancegroup bastions --role Bastion --subnet utility-ap-northeast-2c
kops create secret sshpublickey admin -i ~/.ssh/id_rsa.pub --state=s3://clusters.dev.datasaker.io
kops create secret sshpublickey admin -i id_rsa_k8s.pub --state=s3://clusters.dev.datasaker.io
kops update cluster --yes // to reconfigure the auto-scaling groups
kops update cluster --yes --state=s3://clusters.dev.datasaker.io
kops rolling-update cluster --name dev.datasaker.io --state=s3://clusters.dev.datasaker.io --yes
kops rolling-update cluster --name <clustername> --yes // to immediately roll all the machines so they have the new key (optional)
// Lambda 설정 변경.
get_names = ['ag-dmz-bastion-datasaker','master-ap-northeast-2a.masters.dev.datasaker.io','master-ap-northeast-2b.masters.dev.datasaker.io','master-ap-northeast-2c.masters.dev.datasaker.io','dev-process-a.dev.datasaker.io','dev-process-b.dev.datasaker.io','dev-process-c.dev.datasaker.io','dev-data-a.dev.datasaker.io','dev-data-b.dev.datasaker.io','dev-data-c.dev.datasaker.io','dev-mgmt-a.dev.datasaker.io','dev-mgmt-b.dev.datasaker.io']
Suggestions:
* validate cluster: kops validate cluster --wait 10m
* list nodes: kubectl get nodes --show-labels
* ssh to the master: ssh -i ~/.ssh/id_rsa ubuntu@api.dev.datasaker.io
* the ubuntu user is specific to Ubuntu. If not using Ubuntu please use the appropriate user based on your OS.
* read about installing addons at: https://kops.sigs.k8s.io/addons.
// when kubecfg changed, due to master redeploy
// kops export kubecfg --admin --state s3://clusters.dev.datasaker.io
kops update cluster --name=dev.datasaker.io --state=s3://clusters.dev.datasaker.io --out=./tf-kops-dev-20200907-ip --target=terraform
115.178.73.2/32 exem router
115.178.73.91/32 proxy
3.35.247.45/32 bastion
api-elb.dev.datasaker.io 에 115.178.73.2 만 적용되는 이슈가 있음.
kops edit cluster --name=dev.datasaker.io
from
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
sshAccess:
- 0.0.0.0/0
- ::/0
to
kubernetesApiAccess:
- 115.178.73.2/32
- 115.178.73.91/32
- 3.35.247.45/32
sshAccess:
- 115.178.73.2/32
- 115.178.73.91/32
- 3.35.247.45/32
kops update cluster --yes --state=s3://clusters.dev.datasaker.io
kops rolling-update cluster --yes --state=s3://clusters.dev.datasaker.io

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
[defaults]
host_key_checking=False

View File

@@ -0,0 +1,95 @@
---
- hosts: bastion
become: true
gather_facts: true
roles:
- role: bastion
vars:
- sshmainport: 2222
admin_users:
- name: "minchulahn"
ip: "10.20.142.22"
description: "안민철"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKDxtkcfx2ITlT2Yh7ZCT79do/25YQ2vROz38m8veAuBhOw+75oZJ4nN//zOWaaMvpC3Z7NIzOR+3UeukhnLZ591q8AaHcKjV8JEJMo2pvpH1vdLcTL9baLqWrxzgRimnZUNf5n5HNr+AKoXuPp//aVSJSoeznb66r04/rJSetT0QGDC8Kj5Q+MNvdd0/3U/nu7JxW9LIEaLoeiX6mVb4PpV7kl3rI3Vut/GnWakOhbS4yNvIFdR6d8rv305/BXJOz/aWy+0j7qK+NBzbSsI/l0vVUHfeD3whYGePCpWmj73ZsMTMjIjrC8DpRQlOJlAZ0GVpQnd/ayIWi4+V8VjvFcd6vSqrhhsNoOyo0Y/6cyO6iyvKqohMK6+HF1w6aXoaGCFFSl/3gw63saNAsdZPArnwf5yZ6GfPa/9bRn2k9g5xfp97Itpo6Iqq+PuRcZOes0EiIQe2hOoYQEIHIRhf8CZ+Xf6W1+XZB+WxEzUe4GCCwgUdTB6RIr4ThDxwCBV0="
- name: "havelight"
ip: "10.20.142.21"
description: "정재희"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDUAppqxDLltrMsYMwIxGi0FA5STA/R+H6oy7myfiJP2Lt4woCogMi3ELVKEhFkeJx4i8y9G80lynEYCHRH1kAQ/7YaJEVFrPXTvBw+OVxYdVS/gLl0rL89ky+n0dv6A9mancrvUOMacI5aN7/W+EhoLohRjRbWlsPGNnvAmO0AZnt595aMUjFkdhusGyBVunDUFSitj9TFkjxDhr6cx8Bi0FLpvdsoAvfqiw/MVKW2pMgj56AT5UCT0wvtSHSNY/C731jP/RKrxP0fnVhIkVys/XmLV/6SVEqL1XwqMTvRfi5+Q8cPsXrnPuUFHiNN4e/MGJkYi0lg7XbX8jDXv3ybdxZ7lGiUDebxjTKBCCghFae3eAwpJADEDfrzb8DHJZFwJVVdKGXvStTWTibcs14ilRPcB4SWIBx/cFCzwOBK/iw8CfEfsbVe6WQbDc4T4LrgL8cUzHPOO8CQcC4DV/O3BuoqQExu6xTmU8rhLT9kgatIdX0K5jgGbuqz7c2lelU="
- name: "sa_8001"
ip: "10.20.142.50"
description: "변정훈"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCgvFtLP7A1bR2ANxHiyTalgaI2pvxnCAzsqTAAh/+egIOi2vUIC2jRWGQXyoiTlupdNWQli2D93tEJBvG3VO5LOVocOHsFnFcV8RsiR4QGhqMeXRfMBWbf7Prby0qWv/VQ00gNWEgEjZUhOfBQeJsozGTd3dS4AgRnQkQmnvCT6TWD7+GwMg1SDlu/23y5aKLmpLkVT9kEG3yxZ3rWQfepjAubt+/saZPtyhkmc9+qhe2K+6PCZU2MCh6TYoKrcRUhVaJLvWqS35/Cv/9oxLg7lZwsasHFO9ANXWV9gBelCXLpYosN5hylUvl4JmSN+/qiOH3hpEbOtTCY/ZU0o1/xXLr0pmbYpZoT6zMKZ5fkweW7xidrg/bI1s/4+DVf4c/NJehw4PL3sqRmVdJsriFUifywh05Up5j1NQANiFlFngwEWy81cWRyvSL5q/plJHSvpd6g+WbsyC/QqYNAhxjnEosOb52QGZmLL7GqaC1hdKDOlJYZK63EBQ8YpHqGHo0="
allow_users:
- name: "wkd1994"
ip: "10.20.142.28"
description: "김동우"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDtmuAhVVyVJ87+t2xVtiS3bvTxxn0dmD7t4D2iSvSErIjRsXRCPLdc/yGWiezj+oVZtRPLJ2mjKToGUerdkcW8oqiQeL0+x/CjdlS2rQXvQa2HXCjB+MejwZyJ2bl7VDtIMdLianJBn7+XVc48+bIf7yait8yVH1aVWcS/AXOSo9LwX/uNW5VCL5BeXSGwXdwkuhjeJurR4WIVSBXuh1ql5Vy6BdSxcmLMihNlIL/DyuzfPLuQZbuSeaJ7eJKiHu63/SwBA1cPzj9tgI7zNvguapIHKXvoK8n5gNUXVRDGnD4J6xbzUQB3DbU8kaz7pDClxzgpkf3MnvP9QvnTyqV+aftYlb02as0PrwIxlTlW/sBxyEGdFe+JwoTctHkrSfp0lYRpyCv3eXJcdDu2l3dTJXAHlpcJuQRH2j9herURxML0w6re1iKJ8MAjOqUvh+B3A1U3x116zEGdsCNCRcfwehEir7fmGKaPvrmOiDOTlNswdL/OJ1RHKFuEZJPlUr8="
- name: "djkim"
ip: "10.20.142.36"
description: "김득진"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9Go9pLADJUQtq+ptTAcSIpi+VYv5/Kik0lBuV8xEc++vNtix5kwi+XSsNShHM3MVeiE8J27rYfyNn79r5pVKMuasMRyP3mTDZtRKr7/piM8MXuGSu1jCsVrTBZX0Sf4wuOA1tSkG9QgjBMZfvE9jOSYozA1K85mVE28m2rTihPnL5zYsDKnx+xIcwUBTpkOCoHiAfAX9b5ADAfScJigSZDjFLvexJ1aapPV2Iajh8huIhWvCUhrqUv/ldUm+b1iiOT7GXdrM/cam3FnLZ0b5KI9CQb7084+4l0BlmtPkuFcIlTDm1K6YO7+Mewd+F9uQZwvxGuElBPg8NVgFLD7+nrf2VlJYYCAeChyDV5+ZD70pSTcvHpJbmLKMtRFGov73ZPJ3vld9XCGUCajaoZz5Kz+ANmSC9nl3FpxnYgvFWfS7iwyC+VkGRKUg96/crXz4D8fW/wIskt+3cVrW9Z66psH41ll979mC8xly0ITWwbQZv7rvbdWSDVKVRgbXQOSc="
- name: "sanghee1357"
ip: "10.20.142.40"
description: "김상희"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC45maYW137cwvdS8AE9UzNHq9AMYrkEJtoNDAOVkUXtpVQITqvBCc4B4FfR5JK2h+imnBDng5fu728YAB7q31BE3Wub8I+QWhnQgv+kH1yMWj2s329tkHvcyNWIHSBqw4z1N74Zba+7mojKioju27HdcRcN1L7tpXSCHrq5bU6++CMShpZ7a3wo20RfikFWd563Y15mE3uDqlbkcuzE0KGSNrdY6Gy9aiE3/poVQRLaCmXnUKNw9wM3UGN9DanJi6iosXrlZRkpwhV+tHh2x+BWCbyY8jj94RDJgMwoKw71tzlEp+B1k6a7g+lEo3KFP//3PQxc9fdKBdg1YzSAKGKjsqATEVclmQHVskk6wZQC/wcjFxrSOreSp6knswX9AhIvGhMtoVo9iMy9cm+F4AauzjjfszCMO484983hIYwsh321VB14Wg7NroCYMUh7krATeKmNWhK0YicYCXINVMphBAcXFhuJduPejz19ZN356t+F/LDqlCxW7kO9QfYUy0="
- name: "jinbekim"
ip: "10.10.142.48"
description: "김진범"
- name: "bypark"
ip: "10.20.142.26"
description: "박병욱"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCZig/9xMWR3QhwHPbkvY2b9nmHiWkJHgnztGfIyxVTmkcsr9QViIvNUINlRBlE2+I5j7R2+0qI5GkAYndJsQoZiZ3iPqxnM5KdB9bEbWS5Tv7pbGyHyzaYPMUS3g6ZRMKnbJlAmhOLuq4TNYaUSESvaiYbCbaZK2JdsfPtSC99Gez6+HNoapILeg6xkxLnMsgUG6QzGaZyRABlPRbctGfx2U7cYe/7b7T+/yNtMU2FKrAJqcy0S1IUzc/dK2m5SQ3Y2GMohuGkv8mfs16i0wi3LfgEIatsmj2KB7Y7lIYW/GEZA2I+K2uH9Pu+F/kmGvAu5jNd1ztSo9MgElyu2NMXYhM3f/eDD+PdHKjUvOtE5twNBHQooPjBpp/mja4hnxLKepTqgP1t6azncPB8m6jC6MTbkhOHpgSNXurhx0kCurLA+l9KaySidhc0mFNJZGRKAhQoMIDFgXlzkZ4GmmtbfOJ/J1k7QqHZya5x6M4mOfvlPECFKVF24vzJVEulY3E="
- name: "joonsoopark"
ip: "10.20.142.33"
description: "박준수"
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICeOzKeL4ZUXw0lEHDZoBsp7M3oobrBI0sWBHdpk0X0T"
- name: "baekchan1024"
ip: "10.20.142.39"
description: "백승찬"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDaqqy9YVwxh37xOU0nytBPd6GEJe30e1L/K5UXDZToteNebyfQrtFogxB6MpMNaAzAk6qbyPuZA3rgP8Y+qXgRlx88cxje5P5yOCsMW2o3xD5PiJ7lluWQ9tlS5ti4B9EWurJOsGF27XKKuSHN+dx9ZIb4sDqLYzmycPNwFaEtH6GQ2vjqpPMfjmKAuYmKD4L7mdA8lXTiRS2uYDkUxwQ+6PU+axTauD9qsXuGDAnGkVHKNE0o9OCf1uoyOhy6EB2sDz5Pymr7fbRJauWNxuSJdYPKY33GdDKpioP/1nRLSLtr1nvLHVrG/5CSNO1x20WYXFEGoMTzW4T5nYSS61apHkQ/0Csv0LBeHPc9gsMPobNJpIYlvGwdODQ+fpgxyB4SAQJKtQR1YB4w5OVtXVZAMvZZKI9gQQHZ8wQ4Zk0erGxKeyLxnDrKKNHLRPyUrjkL7H2a0i8BGpdk8sxW9NVrJJGgmQQiPbJx0yvIi1n55mUq+ZVjiF5qPvxtc5D133k="
- name: "jungry"
ip: "10.20.142.44"
description: "서정우"
- name: "ose"
ip: "10.20.142.34"
description: "오승은"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDAlYSGJZpOOEPZqIa1/CXxiaUNj1wsgkp0kEyD2SX8r7ovwmSAWCS24v/IOSgsUTFRpL64vIeCtcZ8sj4Hwzd3F2h+carQP0v+leCkzPpQ7aP/BoPS27+fSCzaOZv/QJ+eIcXWHIbWkXf6MYQ35PykDeJIO61OMOlWhpNV425VSwfZoB72xZmEH+rIZjXHHs8vYtIG2sXZE22BLiVw6PEL/C4QB2khBT5ZAjX2xGEzUoSknzva/8Uu20adQBalFTIdyLV7V6CxkIPkSgfmZh/fqXfbfPsxHLPK2o2ueGbx3fcN3kAqFrqpJgjEIZmNj6qhVPtbN5TSUyIjtoPhC4JR0heqckz1qLah+8lSiUfHSblGW89QuUcedHdwHp/RiZW6HQO0cqS/QPNcgPLTiv68voBapS9rav+j0tt1RynNY+AdhCOoo4BbGW0pXqi0vaHzbbfbzxp78kx/7/KXmUHkzGSkmlXVbKqzDm5k/kRn0q4pimDun42b+MjNYu3gZz0="
- name: "gurwns1540"
ip: "10.20.142.35"
description: "윤혁준"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1+kC8LzDxwc4gfiGzUQH+CeKGf+elX3oKciMmLQJmpddlcWuRthq1pszufHjypT/FfC/XVLZkGvjMDJUWro/Pen3RcdTcENteVZO/nzQ89nmS/D3tbg6nVWxiast6bDdSEdPF8CKSUAlA+8hTgSCWou7TtOuWGCKj+6HSHctBA41WFLpYInYHWTnC+LY1nwOurjG4qjmgdEzBXMhLWvuZDVE21oIUMEXbjW1dXhHNMKfyn/mUqSSG9zoXZSK0KB8OHhBsbxzFqu5cXC1TTpJOyX05730LUdwF9MevreUS3ws5NY8h0C6EVAOMQqeH5gkwVTHsyXQHtXB9nGI1g7sMIjEzJHkOygK17nAfapWhGFahhaaq42qdo7N3Pj8IjrY3S9EDXnPtQODROj3JVzo3Sgd2FUKDcAIWwJHMAwkaFqciPGIrj4ib81NbOoWn7oCjbIyDxgoxSp1vpW7C25rL22LtrCHyMWPbhV19FJIZqtg7f94JptzLND1pHDnsnfeNAxz9d6oKdcJW5bXUDeDCQxBio1RBF6nNzSRoiD0+FD29of9wNWRd2cBkR8uJV7P9XfXMzMK5q7Wqte/DABs3wJ3v/cth6kPrRV7j2h+4DGbEj5Mpz8XAFnGkZFmd/UiSbNqRBLKmp0lPpyxZrRU00xuqJ51pYB2wMwkQgOIVuw=="
- name: "yyeun"
ip: "10.20.142.45"
description: "이예은"
- name: "sujung"
ip: "10.20.142.27"
description: "정성락"
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKbI5DjRkABz65NnREzf5HKKIMPrIA4DrnDDXTrjnRH8"
- name: "antcho"
ip: "10.20.142.46"
description: "조혜수"
- name: "stdhsw"
ip: "10.20.142.32"
description: "한승우"
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIANTMTgqbTtIKKRsZU9An9D3La9Fh1bUtiLE/Y0nL4CZ"
- name: "seungjinjeong"
ip: "10.20.142.41"
description: "정승진"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDi8funYVM0eRmfplW5EdnfJOFEVDEMMw08VRn6FD9x9VuNWCEkY3iErzekBY2SRat8g6q0VXRyu7b/bhm/kD+BtI79fmz9FKxslTZCeKKN1KWfoZoXSRnvjOX1Y6NDnY2X5M+3kN40ek9ku6abN6lOtInTXJ1QOJIISa8l6vrB/j1xVVZghTYY5MBMc89cRZESGdBZWld0CtmoM+mnjh5vWCCA3VJTcDbj5LKtWllA6t58KwtGBikr8iaOpi83dQ91eXWzxTttl/LCe9bfgSxYlmvZILn0UZMu1WiWBhlIBzC6RlxorkDVRXcSRjguEt+/ys2rv6UTSkm150O4PgjgxlZPmTJt1m5y/St57LELUVbV6XGSq6+eZNTZOYBxxRkKcV0uByCBjxjsVlMmoEZoxedhSVT1Z8/AiMnjPBjXx2ease04EvtZs6rpDRd0puzcx1TKoCkyak60ymxc91X9lQg3kUl0av/G5kMKJQqW6v31GA1Vnh4K9haCVF/Ki/M="

6
01-old/ansible/inventory Normal file
View File

@@ -0,0 +1,6 @@
[datasaker-demo]
10.10.43.100
10.10.43.101
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight

View File

@@ -0,0 +1,3 @@
[host]
10.10.43.[100:101] ansible_user=root
10.10.43.[110:153]

6
01-old/ansible/node.yaml Normal file
View File

@@ -0,0 +1,6 @@
---
- name: check ls
hosts: all
become: true
roles:
- node

View File

@@ -0,0 +1,48 @@
# Password aging settings
os_auth_pw_max_age: 90
os_auth_pw_min_age: 1
os_auth_pw_warn_age: 7
passhistory: 2
# Inactivity and Failed attempts lockout settings
fail_deny: 5
fail_unlock: 0
inactive_lock: 0
shell_timeout: 300
# tally settings
onerr: 'fail'
deny: 5
unlock_time: 300
# Password complexity settings
pwquality_minlen: 9
pwquality_maxrepeat: 3
pwquality_lcredit: -1
pwquality_ucredit: -1
pwquality_dcredit: -1
pwquality_ocredit: -1
# SSH settings
sshrootlogin: 'yes'
sshmainport: 22
ssh_service_name: sshd
# Crictl setup
crictl_app: crictl
crictl_version: 1.25.0
crictl_os: linux
crictl_arch: amd64
crictl_dl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/{{ crictl_app }}-v{{ crictl_version }}-{{ crictl_os }}-{{ crictl_arch }}.tar.gz
crictl_bin_path: /usr/local/bin
crictl_file_owner: root
crictl_file_group: root
# temp
username: root
password: saasadmin1234
# common user flag
common_user: False
pause_time: 1

View File

@@ -0,0 +1,20 @@
#!/bin/sh
printf '''
|-----------------------------------------------------------------|
| This system is for the use of authorized users only. |
| Individuals using this computer system without authority, or in |
| excess of their authority, are subject to having all of their |
| activities on this system monitored and recorded by system |
| personnel. |
| |
| In the course of monitoring individuals improperly using this |
| system, or in the course of system maintenance, the activities |
| of authorized users may also be monitored. |
| |
| Anyone using this system expressly consents to such monitoring |
| and is advised that if such monitoring reveals possible |
| evidence of criminal activity, system personnel may provide the |
| evidence of such monitoring to law enforcement officials. |
|-----------------------------------------------------------------|
'''

View File

@@ -0,0 +1,6 @@
---
- name: restart sshd
service:
name: "{{ ssh_service_name }}"
state: restarted
enabled: true

View File

@@ -0,0 +1,7 @@
---
- name: user change
user:
name: "{{ username }}"
password: "{{ password | password_hash('sha512') }}"
state: present

View File

@@ -0,0 +1,29 @@
---
- name: Create a tar.gz archive of a single file.
archive:
path: /etc/update-motd.d/*
dest: /etc/update-motd.d/motd.tar.gz
format: gz
force_archive: true
- name: remove a motd.d files
file:
path: /etc/update-motd.d/{{ item }}
state: absent
with_items:
- 10-help-text
- 85-fwupd
- 90-updates-available
- 91-release-upgrade
- 95-hwe-eol
- 98-fsck-at-reboot
- 50-motd-news
- 88-esm-announce
- name: Create login banner
copy:
src: login_banner
dest: /etc/update-motd.d/00-header
owner: root
group: root
mode: 0755

View File

@@ -0,0 +1,19 @@
---
- name: Downloading and extracting {{ crictl_app }} {{ crictl_version }}
unarchive:
src: "{{ crictl_dl_url }}"
dest: "{{ crictl_bin_path }}"
owner: "{{ crictl_file_owner }}"
group: "{{ crictl_file_group }}"
extra_opts:
- crictl
remote_src: yes
- name: Crictl command crontab setting
ansible.builtin.cron:
name: crontab command
minute: "0"
hour: "3"
user: root
job: "/usr/local/bin/crictl rmi --prune"

View File

@@ -0,0 +1,48 @@
---
- name: Set pass max days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MAX_DAYS.*$'
line: "PASS_MAX_DAYS\t{{os_auth_pw_max_age}}"
backrefs: yes
- name: Set pass min days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MIN_DAYS.*$'
line: "PASS_MIN_DAYS\t{{os_auth_pw_min_age}}"
backrefs: yes
- name: Set pass min length
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MIN_LEN.*$'
line: "PASS_MIN_LEN\t{{pwquality_minlen}}"
backrefs: yes
- name: Set pass warn days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_WARN_AGE.*$'
line: "PASS_WARN_AGE\t{{os_auth_pw_warn_age}}"
backrefs: yes
- name: Set password encryption to SHA512
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^ENCRYPT_METHOD\s.*$'
line: "ENCRYPT_METHOD\tSHA512"
backrefs: yes
- name: Disable MD5 crypt explicitly
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^MD5_CRYPT_ENAB.*$'
line: "MD5_CRYPT_ENAB NO"
backrefs: yes

View File

@@ -0,0 +1,12 @@
---
- pause:
seconds: "{{ pause_time }}"
- include: sshd_config.yml
tags: sshd_config
- include: sudoers.yml
tags: sudoers
- include: admin_set.yml
tags: admin_set

View File

@@ -0,0 +1,50 @@
---
- name: Add pam_tally2.so
template:
src: common-auth.j2
dest: /etc/pam.d/common-auth
owner: root
group: root
mode: 0644
- name: Create pwquality.conf password complexity configuration
block:
- apt:
name: libpam-pwquality
state: present
install_recommends: false
- template:
src: pwquality.conf.j2
dest: /etc/security/pwquality.conf
owner: root
group: root
mode: 0644
- name: Add pam_tally2.so
block:
- lineinfile:
dest: /etc/pam.d/common-account
regexp: '^account\srequisite'
line: "account requisite pam_deny.so"
- lineinfile:
dest: /etc/pam.d/common-account
regexp: '^account\srequired'
line: "account required pam_tally2.so"
- name: password reuse is limited
lineinfile:
dest: /etc/pam.d/common-password
line: "password required pam_pwhistory.so remember=5"
- name: password hashing algorithm is SHA-512
lineinfile:
dest: /etc/pam.d/common-password
regexp: '^password\s+\[success'
line: "password [success=1 default=ignore] pam_unix.so sha512"
- name: Shadow Password Suite Parameters
lineinfile:
dest: /etc/pam.d/common-password
regexp: '^password\s+\[success'
line: "password [success=1 default=ignore] pam_unix.so sha512"

View File

@@ -0,0 +1,24 @@
---
- name: Set session timeout
lineinfile:
dest: /etc/profile
regexp: '^TMOUT=.*'
insertbefore: '^readonly TMOUT'
line: 'TMOUT={{shell_timeout}}'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"
- name: Set TMOUT readonly
lineinfile:
dest: /etc/profile
regexp: '^readonly TMOUT'
insertafter: 'TMOUT={{shell_timeout}}'
line: 'readonly TMOUT'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"
- name: Set export TMOUT
lineinfile:
dest: /etc/profile
regexp: '^export TMOUT.*'
insertafter: 'readonly TMOUT'
line: 'export TMOUT'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"

View File

@@ -0,0 +1,30 @@
---
- name: Configure ssh root login to {{sshrootlogin}}
lineinfile:
dest: /etc/ssh/sshd_config
regexp: '^(#)?PermitRootLogin.*'
line: 'PermitRootLogin {{sshrootlogin}}'
insertbefore: '^Match.*'
state: present
owner: root
group: root
mode: 0640
notify: restart sshd
- name: SSH Listen on Main Port
lineinfile:
dest: /etc/ssh/sshd_config
insertbefore: '^#*AddressFamily'
line: 'Port {{sshmainport}}'
state: present
owner: root
group: root
mode: 0640
notify: restart sshd
- name: "Setting sshd allow users"
template:
src: allow_users.j2
dest: "/etc/ssh/sshd_config.d/allow_users.conf"
notify: restart sshd

View File

@@ -0,0 +1,107 @@
---
- name: Get all ssh sessions
shell: ps -ef | grep sshd | grep -v root | grep -v "{{ ansible_user }}" | awk '{print $2}'
register: ssh_sessions
ignore_errors: true
- name: Terminate ssh sessions
shell: kill -9 {{ item }}
with_items: "{{ ssh_sessions.stdout_lines }}"
when: ssh_sessions is defined
ignore_errors: true
- name: "Create devops group"
ansible.builtin.group:
name: "devops"
state: present
- name: "get current users"
shell: "cat /etc/passwd | egrep -iv '(false|nologin|sync|root|dev2-iac)' | awk -F: '{print $1}'"
register: deleting_users
- name: "Delete users"
ansible.builtin.user:
name: "{{ item }}"
state: absent
remove: yes
with_items: "{{ deleting_users.stdout_lines }}"
when: item != ansible_user
ignore_errors: true
- name: "Create admin user"
ansible.builtin.user:
name: "{{ item.name }}"
group: "devops"
shell: "/bin/bash"
system: yes
state: present
with_items: "{{ admin_users }}"
when:
- item.name is defined
ignore_errors: true
- name: "admin user password change"
user:
name: "{{ item.name }}"
password: "{{ password | password_hash('sha512') }}"
state: present
with_items: "{{ admin_users }}"
when:
- item.name is defined
ignore_errors: true
- name: "Add admin user key"
authorized_key:
user: "{{ item.name }}"
state: present
key: "{{ item.key }}"
with_items: "{{ admin_users }}"
when:
- item.name is defined
- item.key is defined
- common_user == True
ignore_errors: true
- name: "Create common user"
ansible.builtin.user:
name: "{{ item.name }}"
group: "users"
shell: "/bin/bash"
system: yes
state: present
with_items: "{{ allow_users }}"
when:
- item.name is defined
- common_user == True
ignore_errors: true
- name: "Change common user password change"
user:
name: "{{ item.name }}"
password: "{{ password | password_hash('sha512') }}"
state: present
with_items: "{{ allow_users }}"
when:
- item.name is defined
- common_user == True
ignore_errors: true
- name: "Add common user key"
authorized_key:
user: "{{ item.name }}"
state: present
key: "{{ item.key }}"
with_items: "{{ allow_users }}"
when:
- item.name is defined
- item.key is defined
- common_user == True
ignore_errors: true
- name: "Setting sudoers allow users"
template:
src: sudoers_users.j2
dest: "/etc/sudoers.d/sudoers_users"
ignore_errors: true

View File

@@ -0,0 +1,11 @@
AllowUsers dev2-iac@10.10.43.*
{% if admin_users is defined %}
{% for user in admin_users %}
AllowUsers {{ user.name }}@{{ user.ip }}
{% endfor %}
{% endif %}
{% if allow_users is defined %}
{% for user in allow_users %}
AllowUsers {{ user.name }}@{{ user.ip }}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,27 @@
#
# /etc/pam.d/common-auth - authentication settings common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}}
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
auth required pam_permit.so
# since the modules above will each just jump around
# and here are more per-package modules (the "Additional" block)
auth optional pam_cap.so
# end of pam-auth-update config

View File

@@ -0,0 +1,50 @@
# Configuration for systemwide password quality limits
# Defaults:
#
# Number of characters in the new password that must not be present in the
# old password.
# difok = 5
#
# Minimum acceptable size for the new password (plus one if
# credits are not disabled which is the default). (See pam_cracklib manual.)
# Cannot be set to lower value than 6.
minlen = {{pwquality_minlen}}
#
# The maximum credit for having digits in the new password. If less than 0
# it is the minimum number of digits in the new password.
dcredit = {{pwquality_dcredit}}
#
# The maximum credit for having uppercase characters in the new password.
# If less than 0 it is the minimum number of uppercase characters in the new
# password.
ucredit = {{pwquality_ucredit}}
#
# The maximum credit for having lowercase characters in the new password.
# If less than 0 it is the minimum number of lowercase characters in the new
# password.
lcredit = {{pwquality_lcredit}}
#
# The maximum credit for having other characters in the new password.
# If less than 0 it is the minimum number of other characters in the new
# password.
ocredit = {{pwquality_ocredit}}
#
# The minimum number of required classes of characters for the new
# password (digits, uppercase, lowercase, others).
# minclass = 0
#
# The maximum number of allowed consecutive same characters in the new password.
# The check is disabled if the value is 0.
maxrepeat = {{pwquality_maxrepeat}}
#
# The maximum number of allowed consecutive characters of the same class in the
# new password.
# The check is disabled if the value is 0.
# maxclassrepeat = 0
#
# Whether to check for the words from the passwd entry GECOS string of the user.
# The check is enabled if the value is not 0.
# gecoscheck = 0
#
# Path to the cracklib dictionaries. Default is to use the cracklib default.
# dictpath =

View File

@@ -0,0 +1,6 @@
dev2-iac ALL=(ALL) NOPASSWD: ALL
{% if allow_users is defined %}
{% for user in admin_users %}
{{ user.name }} ALL=(ALL) NOPASSWD: ALL
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,12 @@
---
- name: echo hello
command: echo "Not Valid Ruby Version"
- name: Update apt repo and cache on all Debian/Ubuntu boxes
apt: update_cache=yes cache_valid_time=3600
- name: Install cifs-utils
apt: name=cifs-utils state=latest update_cache=yes
- name: Install nfs-common
apt: name=nfs-common state=latest update_cache=yes

View File

@@ -0,0 +1,43 @@
# Password aging settings
os_auth_pw_max_age: 90
os_auth_pw_min_age: 10
os_auth_pw_warn_age: 7
passhistory: 2
# Inactivity and Failed attempts lockout settings
fail_deny: 5
fail_unlock: 0
inactive_lock: 0
shell_timeout: 300
# tally settings
onerr: 'fail'
deny: 5
unlock_time: 300
# Password complexity settings
pwquality_minlen: 9
pwquality_maxrepeat: 3
pwquality_lcredit: -1
pwquality_ucredit: -1
pwquality_dcredit: -1
pwquality_ocredit: -1
# SSH settings
sshrootlogin: 'forced-commands-only'
sshmainport: 22
ssh_service_name: sshd
# Crictl setup
crictl_app: crictl
crictl_version: 1.25.0
crictl_os: linux
crictl_arch: amd64
crictl_dl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/{{ crictl_app }}-v{{ crictl_version }}-{{ crictl_os }}-{{ crictl_arch }}.tar.gz
crictl_bin_path: /usr/local/bin
crictl_file_owner: root
crictl_file_group: root
# temp
username: root
password: saasadmin1234!@#$

View File

@@ -0,0 +1,2 @@
AllowUsers *@10.20.142.*
AllowUsers *@10.10.43.*

View File

@@ -0,0 +1,39 @@
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
[grpc]
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
level = "info"
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.7"
max_container_log_line_size = -1
enable_unprivileged_ports = false
enable_unprivileged_icmp = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
runtime_engine = ""
runtime_root = ""
base_runtime_spec = "/etc/containerd/cri-base.json"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
systemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]

View File

@@ -0,0 +1,60 @@
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
[grpc]
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
level = "info"
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.7"
max_container_log_line_size = -1
enable_unprivileged_ports = false
enable_unprivileged_icmp = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
runtime_engine = ""
runtime_root = ""
base_runtime_spec = "/etc/containerd/cri-base.json"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
systemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.10.31.243:5000"]
endpoint = ["http://10.10.31.243:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.10.43.240:30500"]
endpoint = ["http://10.10.43.240:30500"]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.31.243:5000".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.31.243:5000".auth]
username = "core"
password = "coreadmin1234"
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.43.240:30500".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.10.43.240:30500".auth]
username = "dsk"
password = "dskadmin1234"
[plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io".auth]
username = "datasaker"
password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc"

View File

@@ -0,0 +1,20 @@
#!/bin/sh
printf '''
|-----------------------------------------------------------------|
| This system is for the use of authorized users only. |
| Individuals using this computer system without authority, or in |
| excess of their authority, are subject to having all of their |
| activities on this system monitored and recorded by system |
| personnel. |
| |
| In the course of monitoring individuals improperly using this |
| system, or in the course of system maintenance, the activities |
| of authorized users may also be monitored. |
| |
| Anyone using this system expressly consents to such monitoring |
| and is advised that if such monitoring reveals possible |
| evidence of criminal activity, system personnel may provide the |
| evidence of such monitoring to law enforcement officials. |
|-----------------------------------------------------------------|
'''

View File

@@ -0,0 +1,3 @@
#[Manager]
#DefaultLimitNOFILE=65535:65535
#DefaultLimitNPROC=65536:65536

View File

@@ -0,0 +1,6 @@
---
- name: restart sshd
service:
name: "{{ ssh_service_name }}"
state: restarted
enabled: true

View File

@@ -0,0 +1,14 @@
---
- name: key add
authorized_key:
user: ubuntu
state: present
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
manage_dir: False
- name: user change
user:
name: "{{ username }}"
password: "{{ password | password_hash('sha512') }}"
state: present

View File

@@ -0,0 +1,29 @@
---
- name: Create a tar.gz archive of a single file.
archive:
path: /etc/update-motd.d/*
dest: /etc/update-motd.d/motd.tar.gz
format: gz
force_archive: true
- name: remove a motd.d files
file:
path: /etc/update-motd.d/{{ item }}
state: absent
with_items:
- 10-help-text
- 85-fwupd
- 90-updates-available
- 91-release-upgrade
- 95-hwe-eol
- 98-fsck-at-reboot
- 50-motd-news
- 88-esm-announce
- name: Create login banner
copy:
src: login_banner
dest: /etc/update-motd.d/00-header
owner: root
group: root
mode: 0755

View File

@@ -0,0 +1,47 @@
---
#- name: Downloading and extracting {{ crictl_app }} {{ crictl_version }}
# unarchive:
# src: "{{ crictl_dl_url }}"
# dest: "{{ crictl_bin_path }}"
# owner: "{{ crictl_file_owner }}"
# group: "{{ crictl_file_group }}"
# extra_opts:
# - crictl
# remote_src: yes
- name: Change containerd config
copy:
src: containerd_dsk_config.toml
dest: /etc/containerd/config.toml
owner: root
group: root
mode: 0640
- name: Restart service containerd
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: containerd
- name: remove all cronjobs for user root
command: crontab -r -u root
ignore_errors: true
- name: Crictl command crontab setting
ansible.builtin.cron:
name: "container container prune"
minute: "0"
hour: "3"
user: root
job: "for id in `crictl ps -a | grep -i exited | awk '{print $1}'`; do crictl rm $id ; done"
- name: Crictl command crontab setting
ansible.builtin.cron:
name: "container image prune"
minute: "10"
hour: "3"
user: root
job: "/usr/local/bin/crictl rmi --prune"

View File

@@ -0,0 +1,48 @@
---
- name: Set pass max days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MAX_DAYS.*$'
line: "PASS_MAX_DAYS\t{{os_auth_pw_max_age}}"
backrefs: yes
- name: Set pass min days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MIN_DAYS.*$'
line: "PASS_MIN_DAYS\t{{os_auth_pw_min_age}}"
backrefs: yes
- name: Set pass min length
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_MIN_LEN.*$'
line: "PASS_MIN_LEN\t{{pwquality_minlen}}"
backrefs: yes
- name: Set pass warn days
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^PASS_WARN_AGE.*$'
line: "PASS_WARN_AGE\t{{os_auth_pw_warn_age}}"
backrefs: yes
- name: Set password encryption to SHA512
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^ENCRYPT_METHOD\s.*$'
line: "ENCRYPT_METHOD\tSHA512"
backrefs: yes
- name: Disable MD5 crypt explicitly
lineinfile:
dest: /etc/login.defs
state: present
regexp: '^MD5_CRYPT_ENAB.*$'
line: "MD5_CRYPT_ENAB NO"
backrefs: yes

View File

@@ -0,0 +1,21 @@
---
- include: login_defs.yml
tags: login_defs
- include: pam.yml
tags: pam
- include: sshd_config.yml
tags: sshd_config
- include: profile.yml
tags: profile
- include: banner.yml
tags: banner
- include: crictl.yml
tags: circtl
#- include: admin_set.yml
# tags: admin_set

View File

@@ -0,0 +1,82 @@
---
- name: Add pam_tally2.so
template:
src: common-auth.j2
dest: /etc/pam.d/common-auth
owner: root
group: root
mode: 0644
- name: Create pwquality.conf password complexity configuration
block:
- apt:
name: libpam-pwquality
state: present
install_recommends: false
- template:
src: pwquality.conf.j2
dest: /etc/security/pwquality.conf
owner: root
group: root
mode: 0644
- name: Add pam_tally2.so
block:
- lineinfile:
dest: /etc/pam.d/common-account
regexp: '^account\srequisite'
line: "account requisite pam_deny.so"
- lineinfile:
dest: /etc/pam.d/common-account
regexp: '^account\srequired'
line: "account required pam_tally2.so"
- name: password reuse is limited
lineinfile:
dest: /etc/pam.d/common-password
line: "password required pam_pwhistory.so remember=5"
- name: password hashing algorithm is SHA-512
lineinfile:
dest: /etc/pam.d/common-password
regexp: '^password\s+\[success'
line: "password [success=1 default=ignore] pam_unix.so sha512"
- name: Shadow Password Suite Parameters
lineinfile:
dest: /etc/pam.d/common-password
regexp: '^password\s+\[success'
line: "password [success=1 default=ignore] pam_unix.so sha512"
#- name: configure system settings, file descriptors and number of threads
# pam_limits:
# domain: '*'
# limit_type: "{{item.limit_type}}"
# limit_item: "{{item.limit_item}}"
# value: "{{item.value}}"
# with_items:
# - { limit_type: '-', limit_item: 'nofile', value: 65536 }
# - { limit_type: '-', limit_item: 'nproc', value: 65536 }
## - { limit_type: 'soft', limit_item: 'memlock', value: unlimited }
## - { limit_type: 'hard', limit_item: 'memlock', value: unlimited }
#- name: reload settings from all system configuration files
# shell: sysctl --system
#- name: Creates directory systemd config
# file:
# path: /etc/systemd/system.conf.d
# state: directory
# owner: root
# group: root
# mode: 0775
#- name: Create systemd limits
# copy:
# src: systemd_limit.conf
# dest: /etc/systemd/system.conf.d/limits.conf
# owner: root
# group: root
# mode: 644

View File

@@ -0,0 +1,24 @@
---
- name: Set session timeout
lineinfile:
dest: /etc/profile
regexp: '^TMOUT=.*'
insertbefore: '^readonly TMOUT'
line: 'TMOUT={{shell_timeout}}'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"
- name: Set TMOUT readonly
lineinfile:
dest: /etc/profile
regexp: '^readonly TMOUT'
insertafter: 'TMOUT={{shell_timeout}}'
line: 'readonly TMOUT'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"
- name: Set export TMOUT
lineinfile:
dest: /etc/profile
regexp: '^export TMOUT.*'
insertafter: 'readonly TMOUT'
line: 'export TMOUT'
state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}"

View File

@@ -0,0 +1,32 @@
---
- name: Configure ssh root login to {{sshrootlogin}}
lineinfile:
dest: /etc/ssh/sshd_config
regexp: '^(#)?PermitRootLogin.*'
line: 'PermitRootLogin {{sshrootlogin}}'
insertbefore: '^Match.*'
state: present
owner: root
group: root
mode: 0640
notify: restart sshd
- name: SSH Listen on Main Port
lineinfile:
dest: /etc/ssh/sshd_config
insertbefore: '^#*AddressFamily'
line: 'Port {{sshmainport}}'
state: present
owner: root
group: root
mode: 0640
notify: restart sshd
- name: SSH AllowUsers Setting
copy:
src: allow_users.conf
dest: /etc/ssh/sshd_config.d/allow_users.conf
owner: root
group: root
mode: 0644

View File

@@ -0,0 +1,27 @@
#
# /etc/pam.d/common-auth - authentication settings common to all services
#
# This file is included from other service-specific PAM config files,
# and should contain a list of the authentication modules that define
# the central authentication scheme for use on the system
# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
# traditional Unix authentication mechanisms.
#
# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
# To take advantage of this, it is recommended that you configure any
# local modules either before or after the default block, and use
# pam-auth-update to manage selection of other modules. See
# pam-auth-update(8) for details.
auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}}
# here are the per-package modules (the "Primary" block)
auth [success=1 default=ignore] pam_unix.so nullok
# here's the fallback if no module succeeds
auth requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
# this avoids us returning an error just because nothing sets a success code
auth required pam_permit.so
# since the modules above will each just jump around
# and here are more per-package modules (the "Additional" block)
auth optional pam_cap.so
# end of pam-auth-update config

View File

@@ -0,0 +1,50 @@
# Configuration for systemwide password quality limits
# Defaults:
#
# Number of characters in the new password that must not be present in the
# old password.
# difok = 5
#
# Minimum acceptable size for the new password (plus one if
# credits are not disabled which is the default). (See pam_cracklib manual.)
# Cannot be set to lower value than 6.
minlen = {{pwquality_minlen}}
#
# The maximum credit for having digits in the new password. If less than 0
# it is the minimum number of digits in the new password.
dcredit = {{pwquality_dcredit}}
#
# The maximum credit for having uppercase characters in the new password.
# If less than 0 it is the minimum number of uppercase characters in the new
# password.
ucredit = {{pwquality_ucredit}}
#
# The maximum credit for having lowercase characters in the new password.
# If less than 0 it is the minimum number of lowercase characters in the new
# password.
lcredit = {{pwquality_lcredit}}
#
# The maximum credit for having other characters in the new password.
# If less than 0 it is the minimum number of other characters in the new
# password.
ocredit = {{pwquality_ocredit}}
#
# The minimum number of required classes of characters for the new
# password (digits, uppercase, lowercase, others).
# minclass = 0
#
# The maximum number of allowed consecutive same characters in the new password.
# The check is disabled if the value is 0.
maxrepeat = {{pwquality_maxrepeat}}
#
# The maximum number of allowed consecutive characters of the same class in the
# new password.
# The check is disabled if the value is 0.
# maxclassrepeat = 0
#
# Whether to check for the words from the passwd entry GECOS string of the user.
# The check is enabled if the value is not 0.
# gecoscheck = 0
#
# Path to the cracklib dictionaries. Default is to use the cracklib default.
# dictpath =

View File

@@ -0,0 +1,4 @@
while read line
do
echo ${line}
done < ip_list

View File

@@ -0,0 +1,37 @@
10.10.43.111
10.10.43.112
10.10.43.113
10.10.43.114
10.10.43.115
10.10.43.116
10.10.43.117
10.10.43.118
10.10.43.119
10.10.43.120
10.10.43.121
10.10.43.122
10.10.43.123
10.10.43.124
10.10.43.125
10.10.43.126
10.10.43.127
10.10.43.128
10.10.43.129
10.10.43.130
10.10.43.131
10.10.43.132
10.10.43.133
10.10.43.134
10.10.43.135
10.10.43.136
10.10.43.137
10.10.43.138
10.10.43.140
10.10.43.141
10.10.43.142
10.10.43.143
10.10.43.144
10.10.43.145
10.10.43.146
10.10.43.147
10.10.43.148

9
01-old/ansible/rsa_key/key.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/expect -f
set password [lindex $argv 0]
set host [lindex $argv 1]
spawn ssh-copy-id -o StrictHostKeyChecking=no root@$host
expect "password:"
send "$password\n"
expect eof

9
01-old/ansible/rsa_key/test.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash
#if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi
passwd=$1
while read ip
do
./key.sh ${passwd} ${ip}
done < ip_list

View File

@@ -0,0 +1,9 @@
---
- name: check ls
hosts: all
become: true
roles:
- security-settings
vars:
sshrootlogin: 'forced-commands-only'

16
01-old/ansible/test.yaml Normal file
View File

@@ -0,0 +1,16 @@
---
- hosts: all
become: yes
tasks:
- name: Create a new user
user:
name: dev2-iac
password: "{{ 'saasadmin1234' | password_hash('sha512') }}"
group: sudo
shell: /bin/bash
- name: Set authorized key taken from file
authorized_key:
user: dev2-iac
state: present
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCsiN0I8B3UmB1mVBxVpvrSF5j0vrwUggngVrlplW8iJLllSBwarHzmSpMWv3eQtb9QQ/HKyOsS3j6UkbQK2aJ6jGeK2pQUkbb6KdMc9OrS/ILWysritcBJ3rUuITwOMvekQHtq+yKshap3uw/8ZEiM1Xn0MxVGhpAZsWbotf9n6ntmsMDXkRSQnYU5T2y4hkWlYImPkIasmbDFVkxi0Wz7I7pUX4hG3l6NJegXWO6n4OcpXxm26oZUtmpqrNRipUIUglM5xp4+YlQhu3FIa/aEZ+fuE9xnSZ8gCYnmPKwJ7AKKkEUruSTA3vhBnlh5rFYgSg5NkVte2RjdPg1SYZCTUXVwE9bbIzeGiXJ9vSe1/bhacpLeLgg48H6SSVInoCmen6W4Oo4/QlekXMBCuxfRwH2pO2K84gEKAAD0hUHBEf0Eh4rIi3K2oUdDCnMv5CD3lqiBn49hFB+bBdk+kxFNNx9iSDciFc91lIjz2IW8FO//+iLO7DEBZMrz/B8bJQ0="

View File

@@ -0,0 +1,2 @@
[host]
10.10.43.111

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,41 @@
#!/bin/bash
export KOPS_STATE_STORE=s3://clusters.prod.datasaker.io
export KOPS_CLUSTER_NAME=k8s-prod.datasaker.io
export VPC_ID="vpc-00ba2b0e9ad59f0ed "
export Network_CIDR="172.24.0.0/19"
export AMI_Image="ami-0abb33b73a78cae31"
export Private_Subnet_ID_1="subnet-024f0deda82039fa4"
export Private_Subnet_ID_2="subnet-050d942fa1c46540a"
export Private_Subnet_ID_3="subnet-0946eb806af7377be"
export Public_Subnet_ID_1="subnet-00c363356f133411d"
export Public_Subnet_ID_2="subnet-07aa5e879a262014d"
export Public_Subnet_ID_3="subnet-0073a61bc56a68a3e"
kops_cmd="""
kops create cluster \
--vpc "$VPC_ID" \
--cloud aws \
--ssh-public-key "$HOME/.ssh/id_rsa.pub" \
--topology private --kubernetes-version "1.25.2" \
--network-cidr "$Network_CIDR" \
--networking calico \
--container-runtime containerd \
--image $AMI_Image \
--zones ap-northeast-2a,ap-northeast-2b,ap-northeast-2c \
--master-count 3 \
--master-size t3.small \
--node-count 3 \
--node-size t3.small \
--node-volume-size 100 \
--subnets "$Private_Subnet_ID_1,$Private_Subnet_ID_2,$Private_Subnet_ID_3" \
--utility-subnets "$Public_Subnet_ID_1,$Public_Subnet_ID_2,$Public_Subnet_ID_3" \
-v 10
"""
kubeconfig="kops export kubecfg --admin --kubeconfig $HOME/.kube/config --name=${KOPS_CLUSTER_NAME} --state=${KOPS_STATE_STORE}"
echo ${kubeconfig}
echo
echo
echo ${kops_cmd}

View File

@@ -0,0 +1,74 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-a
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.2xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-druid-a
datasaker/group: data-druid
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-b
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-druid-b
datasaker/group: data-druid
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-c
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.4xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-druid-c
datasaker/group: data-druid
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-druid:NoSchedule

View File

@@ -0,0 +1,74 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-es-a
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-es-a
datasaker/group: data-es
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-es:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-es-b
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-es-b
datasaker/group: data-es
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-es:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-es-c
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-es-c
datasaker/group: data-es
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-es:NoSchedule

View File

@@ -0,0 +1,74 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-a
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-kafka-a
datasaker/group: data-kafka
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-b
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-kafka-b
datasaker/group: data-kafka
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-c
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: k8s-prod-data-kafka-c
datasaker/group: data-kafka
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-kafka:NoSchedule

View File

@@ -0,0 +1,79 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-a
spec:
image: ami-0409b7ddbc59e3222
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-a
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-a2
spec:
image: ami-0409b7ddbc59e3222
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-a2
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-b
spec:
image: ami-0409b7ddbc59e3222
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-b
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-c
spec:
image: ami-0409b7ddbc59e3222
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-c
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2c

View File

@@ -0,0 +1,411 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 2
name: k8s-prod.datasaker.io
spec:
api:
loadBalancer:
class: Network
type: Public
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: s3://clusters.prod.datasaker.io/k8s-prod.datasaker.io
containerRuntime: containerd
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.25.2
masterInternalName: api.internal.k8s-prod.datasaker.io
masterPublicName: api.k8s-prod.datasaker.io
networkCIDR: 172.24.0.0/19
networkID: vpc-00ba2b0e9ad59f0ed
networking:
calico: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.24.8.0/23
id: subnet-024f0deda82039fa4
name: ap-northeast-2a
type: Private
zone: ap-northeast-2a
- cidr: 172.24.10.0/23
id: subnet-050d942fa1c46540a
name: ap-northeast-2b
type: Private
zone: ap-northeast-2b
- cidr: 172.24.12.0/23
id: subnet-0946eb806af7377be
name: ap-northeast-2c
type: Private
zone: ap-northeast-2c
- cidr: 172.24.0.0/24
id: subnet-00c363356f133411d
name: utility-ap-northeast-2a
type: Utility
zone: ap-northeast-2a
- cidr: 172.24.1.0/24
id: subnet-07aa5e879a262014d
name: utility-ap-northeast-2b
type: Utility
zone: ap-northeast-2b
- cidr: 172.24.2.0/24
id: subnet-0073a61bc56a68a3e
name: utility-ap-northeast-2c
type: Utility
zone: ap-northeast-2c
topology:
dns:
type: Public
masters: private
nodes: private
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:50:52Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-a
spec:
image: ami-0abb33b73a78cae31
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-a
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:50:52Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-b
spec:
image: ami-0abb33b73a78cae31
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-b
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:50:52Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-c
spec:
image: ami-0abb33b73a78cae31
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-c
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:35Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-a
spec:
image: ami-0abb33b73a78cae31
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-kafka
kops.k8s.io/instancegroup: k8s-prod-data-kafka-a
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:35Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-b
spec:
image: ami-0abb33b73a78cae31
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-kafka
kops.k8s.io/instancegroup: k8s-prod-data-kafka-b
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:35Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-c
spec:
image: ami-0abb33b73a78cae31
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-kafka
kops.k8s.io/instancegroup: k8s-prod-data-kafka-c
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:49Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-a
spec:
image: ami-0abb33b73a78cae31
machineType: c5.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-a
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:49Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-b
spec:
image: ami-0abb33b73a78cae31
machineType: c5.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-b
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:49Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-c
spec:
image: ami-0abb33b73a78cae31
machineType: c5.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-c
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2c
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: master-ap-northeast-2a
spec:
image: ami-0abb33b73a78cae31
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: c5a.large
maxSize: 1
minSize: 1
role: Master
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: master-ap-northeast-2b
spec:
image: ami-0abb33b73a78cae31
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: c5a.large
maxSize: 1
minSize: 1
role: Master
subnets:
- ap-northeast-2b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 1
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: master-ap-northeast-2c
spec:
image: ami-0abb33b73a78cae31
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: c5a.large
maxSize: 1
minSize: 1
role: Master
subnets:
- ap-northeast-2c

View File

@@ -0,0 +1,411 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 5
name: k8s-prod.datasaker.io
spec:
api:
loadBalancer:
class: Network
type: Public
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: s3://clusters.prod.datasaker.io/k8s-prod.datasaker.io
containerRuntime: containerd
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.25.2
masterInternalName: api.internal.k8s-prod.datasaker.io
masterPublicName: api.k8s-prod.datasaker.io
networkCIDR: 172.24.0.0/19
networkID: vpc-00ba2b0e9ad59f0ed
networking:
calico: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.24.8.0/23
id: subnet-024f0deda82039fa4
name: ap-northeast-2a
type: Private
zone: ap-northeast-2a
- cidr: 172.24.10.0/23
id: subnet-050d942fa1c46540a
name: ap-northeast-2b
type: Private
zone: ap-northeast-2b
- cidr: 172.24.12.0/23
id: subnet-0946eb806af7377be
name: ap-northeast-2c
type: Private
zone: ap-northeast-2c
- cidr: 172.24.0.0/24
id: subnet-00c363356f133411d
name: utility-ap-northeast-2a
type: Utility
zone: ap-northeast-2a
- cidr: 172.24.1.0/24
id: subnet-07aa5e879a262014d
name: utility-ap-northeast-2b
type: Utility
zone: ap-northeast-2b
- cidr: 172.24.2.0/24
id: subnet-0073a61bc56a68a3e
name: utility-ap-northeast-2c
type: Utility
zone: ap-northeast-2c
topology:
dns:
type: Public
masters: private
nodes: private
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:50:52Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-a
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-a
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:50:52Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-b
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-b
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:50:52Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-druid-c
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-druid
kops.k8s.io/instancegroup: k8s-prod-data-druid-c
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-druid:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:35Z"
generation: 3
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-a
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-kafka
kops.k8s.io/instancegroup: k8s-prod-data-kafka-a
role: Node
subnets:
- ap-northeast-2a
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:35Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-b
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-kafka
kops.k8s.io/instancegroup: k8s-prod-data-kafka-b
role: Node
subnets:
- ap-northeast-2b
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:35Z"
generation: 3
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-data-kafka-c
spec:
image: ami-0409b7ddbc59e3222
kubelet:
anonymousAuth: false
nodeLabels:
node-role.kubernetes.io/node: ""
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: data-kafka
kops.k8s.io/instancegroup: k8s-prod-data-kafka-c
role: Node
subnets:
- ap-northeast-2c
taints:
- prod/data-kafka:NoSchedule
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:49Z"
generation: 3
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-a
spec:
image: ami-0409b7ddbc59e3222
machineType: c5.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-a
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:49Z"
generation: 3
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-b
spec:
image: ami-0409b7ddbc59e3222
machineType: c5.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-b
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-02T01:51:49Z"
generation: 3
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-c
spec:
image: ami-0409b7ddbc59e3222
machineType: c5.large
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-c
role: Node
rootVolumeSize: 100
subnets:
- ap-northeast-2c
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: master-ap-northeast-2a
spec:
image: ami-0409b7ddbc59e3222
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: c5a.large
maxSize: 1
minSize: 1
role: Master
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: master-ap-northeast-2b
spec:
image: ami-0409b7ddbc59e3222
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: c5a.large
maxSize: 1
minSize: 1
role: Master
subnets:
- ap-northeast-2b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2022-11-01T05:36:36Z"
generation: 2
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: master-ap-northeast-2c
spec:
image: ami-0409b7ddbc59e3222
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: c5a.large
maxSize: 1
minSize: 1
role: Master
subnets:
- ap-northeast-2c

View File

@@ -0,0 +1,95 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-rel-process-a
spec:
image: ami-0409b7ddbc59e3222
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
mixedInstancesPolicy:
onDemandAboveBase: 0
onDemandBase: 0
spotAllocationStrategy: capacity-optimized
nodeLabels:
datasaker/group: rel-process
kops.k8s.io/instancegroup: k8s-rel-process-a
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-rel-process-a2
spec:
image: ami-0409b7ddbc59e3222
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
mixedInstancesPolicy:
onDemandAboveBase: 0
onDemandBase: 0
spotAllocationStrategy: capacity-optimized
nodeLabels:
datasaker/group: rel-process
kops.k8s.io/instancegroup: k8s-rel-process-a2
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-rel-process-b
spec:
image: ami-0409b7ddbc59e3222
machineType: m6i.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
mixedInstancesPolicy:
onDemandAboveBase: 0
onDemandBase: 0
spotAllocationStrategy: capacity-optimized
nodeLabels:
datasaker/group: rel-process
kops.k8s.io/instancegroup: k8s-rel-process-b
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-rel-process-c
spec:
image: ami-0409b7ddbc59e3222
machineType: m5a.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
mixedInstancesPolicy:
onDemandAboveBase: 0
onDemandBase: 0
spotAllocationStrategy: capacity-optimized
nodeLabels:
datasaker/group: rel-process
kops.k8s.io/instancegroup: k8s-rel-process-c
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2c

View File

@@ -0,0 +1,20 @@
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
labels:
kops.k8s.io/cluster: k8s-prod.datasaker.io
name: k8s-prod-process-a
spec:
image: ami-0409b7ddbc59e3222
machineType: c5.xlarge
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: k8s-prod-process-a
rootVolumeSize: 100
role: Node
subnets:
- ap-northeast-2a

View File

@@ -0,0 +1,19 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
ingress.kubernetes.io/ssl-redirect: "true"
kubernetes.io/tls-acme: "true"
name: kubedash.kr.datasaker.io
namespace: kubernetes-dashboard
spec:
ingressClassName: nginx
rules:
- host: kubedash.kr.datasaker.io
http:
paths:
- backend:
service:
name: kubernetes-dashboard
port:
number: 443
path: /
pathType: Prefix
tls:
- hosts:
- kubedash.kr.datasaker.io
secretName: kubedash.kr.datasaker.io-tls

View File

@@ -0,0 +1,308 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 9090
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.5.1
imagePullPolicy: Always
ports:
- containerPort: 9090
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
- --insecure-bind-address=0.0.0.0
- --enable-insecure-login
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.7
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@@ -0,0 +1,24 @@
annotations:
artifacthub.io/prerelease: "false"
artifacthub.io/signKey: |
fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E
url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg
apiVersion: v1
appVersion: v1.10.0
description: A Helm chart for cert-manager
home: https://github.com/cert-manager/cert-manager
icon: https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png
keywords:
- cert-manager
- kube-lego
- letsencrypt
- tls
kubeVersion: '>= 1.20.0-0'
maintainers:
- email: cert-manager-maintainers@googlegroups.com
name: cert-manager-maintainers
url: https://cert-manager.io
name: cert-manager
sources:
- https://github.com/cert-manager/cert-manager
version: v1.10.0

View File

@@ -0,0 +1,248 @@
# cert-manager
cert-manager is a Kubernetes addon to automate the management and issuance of
TLS certificates from various issuing sources.
It will ensure certificates are valid and up to date periodically, and attempt
to renew certificates at an appropriate time before expiry.
## Prerequisites
- Kubernetes 1.20+
## Installing the Chart
Full installation instructions, including details on how to configure extra
functionality in cert-manager can be found in the [installation docs](https://cert-manager.io/docs/installation/kubernetes/).
Before installing the chart, you must first install the cert-manager CustomResourceDefinition resources.
This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources.
```bash
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.0/cert-manager.crds.yaml
```
To install the chart with the release name `my-release`:
```console
## Add the Jetstack Helm repository
$ helm repo add jetstack https://charts.jetstack.io
## Install the cert-manager helm chart
$ helm install my-release --namespace cert-manager --version v1.10.0 jetstack/cert-manager
```
In order to begin issuing certificates, you will need to set up a ClusterIssuer
or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).
More information on the different types of issuers and how to configure them
can be found in [our documentation](https://cert-manager.io/docs/configuration/).
For information on how to configure cert-manager to automatically provision
Certificates for Ingress resources, take a look at the
[Securing Ingresses documentation](https://cert-manager.io/docs/usage/ingress/).
> **Tip**: List all releases using `helm list`
## Upgrading the Chart
Special considerations may be required when upgrading the Helm chart, and these
are documented in our full [upgrading guide](https://cert-manager.io/docs/installation/upgrading/).
**Please check here before performing upgrades!**
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
If you want to completely uninstall cert-manager from your cluster, you will also need to
delete the previously installed CustomResourceDefinition resources:
```console
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.0/cert-manager.crds.yaml
```
## Configuration
The following table lists the configurable parameters of the cert-manager chart and their default values.
| Parameter | Description | Default |
| --------- | ----------- | ------- |
| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` |
| `global.commonLabels` | Labels to apply to all resources | `{}` |
| `global.rbac.create` | If `true`, create and use RBAC resources (includes sub-charts) | `true` |
| `global.priorityClassName`| Priority class name for cert-manager and webhook pods | `""` |
| `global.podSecurityPolicy.enabled` | If `true`, create and use PodSecurityPolicy (includes sub-charts) | `false` |
| `global.podSecurityPolicy.useAppArmor` | If `true`, use Apparmor seccomp profile in PSP | `true` |
| `global.leaderElection.namespace` | Override the namespace used to store the ConfigMap for leader election | `kube-system` |
| `global.leaderElection.leaseDuration` | The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate | |
| `global.leaderElection.renewDeadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration | |
| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | |
| `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` |
| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` |
| `image.tag` | Image tag | `v1.10.0` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of cert-manager replicas | `1` |
| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod |
| `featureGates` | Set of comma-separated key=value pairs that describe feature gates on the controller. Some feature gates may also have to be enabled on other components, and can be set supplying the `feature-gate` flag to `<component>.extraArgs` | `` |
| `extraArgs` | Optional flags for cert-manager | `[]` |
| `extraEnv` | Optional environment variables for cert-manager | `[]` |
| `serviceAccount.create` | If `true`, create a new service account | `true` |
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | |
| `serviceAccount.annotations` | Annotations to add to the service account | |
| `serviceAccount.automountServiceAccountToken` | Automount API credentials for the Service Account | `true` |
| `volumes` | Optional volumes for cert-manager | `[]` |
| `volumeMounts` | Optional volume mounts for cert-manager | `[]` |
| `resources` | CPU/memory resource requests/limits | `{}` |
| `securityContext` | Security context for the controller pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
| `containerSecurityContext` | Security context to be set on the controller component container | refer to [Default Security Contexts](#default-security-contexts) |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `affinity` | Node affinity for pod assignment | `{}` |
| `tolerations` | Node tolerations for pod assignment | `[]` |
| `topologySpreadConstraints` | Topology spread constraints for pod assignment | `[]` |
| `ingressShim.defaultIssuerName` | Optional default issuer to use for ingress resources | |
| `ingressShim.defaultIssuerKind` | Optional default issuer kind to use for ingress resources | |
| `ingressShim.defaultIssuerGroup` | Optional default issuer group to use for ingress resources | |
| `prometheus.enabled` | Enable Prometheus monitoring | `true` |
| `prometheus.servicemonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` |
| `prometheus.servicemonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) |
| `prometheus.servicemonitor.prometheusInstance` | Prometheus Instance definition | `default` |
| `prometheus.servicemonitor.targetPort` | Prometheus scrape port | `9402` |
| `prometheus.servicemonitor.path` | Prometheus scrape path | `/metrics` |
| `prometheus.servicemonitor.interval` | Prometheus scrape interval | `60s` |
| `prometheus.servicemonitor.labels` | Add custom labels to ServiceMonitor | |
| `prometheus.servicemonitor.scrapeTimeout` | Prometheus scrape timeout | `30s` |
| `prometheus.servicemonitor.honorLabels` | Enable label honoring for metrics scraped by Prometheus (see [Prometheus scrape config docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) for details). By setting `honorLabels` to `true`, Prometheus will prefer label contents given by cert-manager on conflicts. Can be used to remove the "exported_namespace" label for example. | `false` |
| `podAnnotations` | Annotations to add to the cert-manager pod | `{}` |
| `deploymentAnnotations` | Annotations to add to the cert-manager deployment | `{}` |
| `podDnsPolicy` | Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy) | |
| `podDnsConfig` | Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) | |
| `podLabels` | Labels to add to the cert-manager pod | `{}` |
| `serviceLabels` | Labels to add to the cert-manager controller service | `{}` |
| `serviceAnnotations` | Annotations to add to the cert-manager service | `{}` |
| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | |
| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | |
| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | |
| `webhook.replicaCount` | Number of cert-manager webhook replicas | `1` |
| `webhook.timeoutSeconds` | Seconds the API server should wait the webhook to respond before treating the call as a failure. | `10` |
| `webhook.podAnnotations` | Annotations to add to the webhook pods | `{}` |
| `webhook.podLabels` | Labels to add to the cert-manager webhook pod | `{}` |
| `webhook.serviceLabels` | Labels to add to the cert-manager webhook service | `{}` |
| `webhook.deploymentAnnotations` | Annotations to add to the webhook deployment | `{}` |
| `webhook.mutatingWebhookConfigurationAnnotations` | Annotations to add to the mutating webhook configuration | `{}` |
| `webhook.validatingWebhookConfigurationAnnotations` | Annotations to add to the validating webhook configuration | `{}` |
| `webhook.serviceAnnotations` | Annotations to add to the webhook service | `{}` |
| `webhook.config` | WebhookConfiguration YAML used to configure flags for the webhook. Generates a ConfigMap containing contents of the field. See `values.yaml` for example. | `{}` |
| `webhook.extraArgs` | Optional flags for cert-manager webhook component | `[]` |
| `webhook.serviceAccount.create` | If `true`, create a new service account for the webhook component | `true` |
| `webhook.serviceAccount.name` | Service account for the webhook component to be used. If not set and `webhook.serviceAccount.create` is `true`, a name is generated using the fullname template | |
| `webhook.serviceAccount.annotations` | Annotations to add to the service account for the webhook component | |
| `webhook.serviceAccount.automountServiceAccountToken` | Automount API credentials for the webhook Service Account | |
| `webhook.resources` | CPU/memory resource requests/limits for the webhook pods | `{}` |
| `webhook.nodeSelector` | Node labels for webhook pod assignment | `{}` |
| `webhook.networkPolicy.enabled` | Enable default network policies for webhooks egress and ingress traffic | `false` |
| `webhook.networkPolicy.ingress` | Sets ingress policy block. See NetworkPolicy documentation. See `values.yaml` for example. | `{}` |
| `webhook.networkPolicy.egress` | Sets ingress policy block. See NetworkPolicy documentation. See `values.yaml` for example. | `{}` |
| `webhook.affinity` | Node affinity for webhook pod assignment | `{}` |
| `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` |
| `webhook.topologySpreadConstraints` | Topology spread constraints for webhook pod assignment | `[]` |
| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` |
| `webhook.image.tag` | Webhook image tag | `v1.10.0` |
| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` |
| `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` |
| `webhook.securityContext` | Security context for webhook pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
| `webhook.containerSecurityContext` | Security context to be set on the webhook component container | refer to [Default Security Contexts](#default-security-contexts) |
| `webhook.hostNetwork` | If `true`, run the Webhook on the host network. | `false` |
| `webhook.serviceType` | The type of the `Service`. | `ClusterIP` |
| `webhook.loadBalancerIP` | The specific load balancer IP to use (when `serviceType` is `LoadBalancer`). | |
| `webhook.url.host` | The host to use to reach the webhook, instead of using internal cluster DNS for the service. | |
| `webhook.livenessProbe.failureThreshold` | The liveness probe failure threshold | `3` |
| `webhook.livenessProbe.initialDelaySeconds` | The liveness probe initial delay (in seconds) | `60` |
| `webhook.livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` |
| `webhook.livenessProbe.successThreshold` | The liveness probe success threshold | `1` |
| `webhook.livenessProbe.timeoutSeconds` | The liveness probe timeout (in seconds) | `1` |
| `webhook.readinessProbe.failureThreshold` | The readiness probe failure threshold | `3` |
| `webhook.readinessProbe.initialDelaySeconds` | The readiness probe initial delay (in seconds) | `5` |
| `webhook.readinessProbe.periodSeconds` | The readiness probe period (in seconds) | `5` |
| `webhook.readinessProbe.successThreshold` | The readiness probe success threshold | `1` |
| `webhook.readinessProbe.timeoutSeconds` | The readiness probe timeout (in seconds) | `1` |
| `cainjector.enabled` | Toggles whether the cainjector component should be installed (required for the webhook component to work) | `true` |
| `cainjector.replicaCount` | Number of cert-manager cainjector replicas | `1` |
| `cainjector.podAnnotations` | Annotations to add to the cainjector pods | `{}` |
| `cainjector.podLabels` | Labels to add to the cert-manager cainjector pod | `{}` |
| `cainjector.deploymentAnnotations` | Annotations to add to the cainjector deployment | `{}` |
| `cainjector.extraArgs` | Optional flags for cert-manager cainjector component | `[]` |
| `cainjector.serviceAccount.create` | If `true`, create a new service account for the cainjector component | `true` |
| `cainjector.serviceAccount.name` | Service account for the cainjector component to be used. If not set and `cainjector.serviceAccount.create` is `true`, a name is generated using the fullname template | |
| `cainjector.serviceAccount.annotations` | Annotations to add to the service account for the cainjector component | |
| `cainjector.serviceAccount.automountServiceAccountToken` | Automount API credentials for the cainjector Service Account | `true` |
| `cainjector.resources` | CPU/memory resource requests/limits for the cainjector pods | `{}` |
| `cainjector.nodeSelector` | Node labels for cainjector pod assignment | `{}` |
| `cainjector.affinity` | Node affinity for cainjector pod assignment | `{}` |
| `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` |
| `cainjector.topologySpreadConstraints` | Topology spread constraints for cainjector pod assignment | `[]` |
| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` |
| `cainjector.image.tag` | cainjector image tag | `v1.10.0` |
| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` |
| `cainjector.securityContext` | Security context for cainjector pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
| `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | refer to [Default Security Contexts](#default-security-contexts) |
| `startupapicheck.enabled` | Toggles whether the startupapicheck Job should be installed | `true` |
| `startupapicheck.securityContext` | Security context for startupapicheck pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
| `startupapicheck.containerSecurityContext` | Security context to be set on startupapicheck component container | refer to [Default Security Contexts](#default-security-contexts) |
| `startupapicheck.timeout` | Timeout for 'kubectl check api' command | `1m` |
| `startupapicheck.backoffLimit` | Job backoffLimit | `4` |
| `startupapicheck.jobAnnotations` | Optional additional annotations to add to the startupapicheck Job | `{}` |
| `startupapicheck.podAnnotations` | Optional additional annotations to add to the startupapicheck Pods | `{}` |
| `startupapicheck.extraArgs` | Optional additional arguments for startupapicheck | `[]` |
| `startupapicheck.resources` | CPU/memory resource requests/limits for the startupapicheck pod | `{}` |
| `startupapicheck.nodeSelector` | Node labels for startupapicheck pod assignment | `{}` |
| `startupapicheck.affinity` | Node affinity for startupapicheck pod assignment | `{}` |
| `startupapicheck.tolerations` | Node tolerations for startupapicheck pod assignment | `[]` |
| `startupapicheck.podLabels` | Optional additional labels to add to the startupapicheck Pods | `{}` |
| `startupapicheck.image.repository` | startupapicheck image repository | `quay.io/jetstack/cert-manager-ctl` |
| `startupapicheck.image.tag` | startupapicheck image tag | `v1.10.0` |
| `startupapicheck.image.pullPolicy` | startupapicheck image pull policy | `IfNotPresent` |
| `startupapicheck.serviceAccount.create` | If `true`, create a new service account for the startupapicheck component | `true` |
| `startupapicheck.serviceAccount.name` | Service account for the startupapicheck component to be used. If not set and `startupapicheck.serviceAccount.create` is `true`, a name is generated using the fullname template | |
| `startupapicheck.serviceAccount.annotations` | Annotations to add to the service account for the startupapicheck component | |
| `startupapicheck.serviceAccount.automountServiceAccountToken` | Automount API credentials for the startupapicheck Service Account | `true` |
### Default Security Contexts
The default pod-level and container-level security contexts, below, adhere to the [restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) Pod Security Standards policies.
Default pod-level securityContext:
```yaml
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
```
Default containerSecurityContext:
```yaml
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
```
### Assigning Values
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install my-release -f values.yaml .
```
> **Tip**: You can use the default [values.yaml](https://github.com/cert-manager/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml)
## Contributing
This chart is maintained at [github.com/cert-manager/cert-manager](https://github.com/cert-manager/cert-manager/tree/master/deploy/charts/cert-manager).

View File

@@ -0,0 +1,2 @@
installCRDs: true

View File

@@ -0,0 +1,15 @@
cert-manager {{ .Chart.AppVersion }} has been deployed successfully!
In order to begin issuing certificates, you will need to set up a ClusterIssuer
or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).
More information on the different types of issuers and how to configure them
can be found in our documentation:
https://cert-manager.io/docs/configuration/
For information on how to configure cert-manager to automatically provision
Certificates for Ingress resources, take a look at the `ingress-shim`
documentation:
https://cert-manager.io/docs/usage/ingress/

View File

@@ -0,0 +1,174 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "cert-manager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "cert-manager.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "cert-manager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "cert-manager.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Webhook templates
*/}}
{{/*
Expand the name of the chart.
Manually fix the 'app' and 'name' labels to 'webhook' to maintain
compatibility with the v0.9 deployment selector.
*/}}
{{- define "webhook.name" -}}
{{- printf "webhook" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "webhook.fullname" -}}
{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 55 | trimSuffix "-" -}}
{{- printf "%s-webhook" $trimmedName | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "webhook.caRef" -}}
{{- template "cert-manager.namespace" }}/{{ template "webhook.fullname" . }}-ca
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "webhook.serviceAccountName" -}}
{{- if .Values.webhook.serviceAccount.create -}}
{{ default (include "webhook.fullname" .) .Values.webhook.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.webhook.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
cainjector templates
*/}}
{{/*
Expand the name of the chart.
Manually fix the 'app' and 'name' labels to 'cainjector' to maintain
compatibility with the v0.9 deployment selector.
*/}}
{{- define "cainjector.name" -}}
{{- printf "cainjector" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "cainjector.fullname" -}}
{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 52 | trimSuffix "-" -}}
{{- printf "%s-cainjector" $trimmedName | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "cainjector.serviceAccountName" -}}
{{- if .Values.cainjector.serviceAccount.create -}}
{{ default (include "cainjector.fullname" .) .Values.cainjector.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.cainjector.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
startupapicheck templates
*/}}
{{/*
Expand the name of the chart.
Manually fix the 'app' and 'name' labels to 'startupapicheck' to maintain
compatibility with the v0.9 deployment selector.
*/}}
{{- define "startupapicheck.name" -}}
{{- printf "startupapicheck" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "startupapicheck.fullname" -}}
{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 52 | trimSuffix "-" -}}
{{- printf "%s-startupapicheck" $trimmedName | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "startupapicheck.serviceAccountName" -}}
{{- if .Values.startupapicheck.serviceAccount.create -}}
{{ default (include "startupapicheck.fullname" .) .Values.startupapicheck.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.startupapicheck.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "chartName" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Labels that should be added on each resource
*/}}
{{- define "labels" -}}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- if eq (default "helm" .Values.creator) "helm" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "chartName" . }}
{{- end -}}
{{- if .Values.global.commonLabels}}
{{ toYaml .Values.global.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Namespace for all resources to be installed into
If not defined in values file then the helm release namespace is used
By default this is not set so the helm release namespace will be used
This gets around an problem within helm discussed here
https://github.com/helm/helm/issues/5358
*/}}
{{- define "cert-manager.namespace" -}}
{{ .Values.namespace | default .Release.Namespace }}
{{- end -}}

View File

@@ -0,0 +1,109 @@
{{- if .Values.cainjector.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "cainjector.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
{{- with .Values.cainjector.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.cainjector.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- with .Values.cainjector.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 8 }}
{{- with .Values.cainjector.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "cainjector.serviceAccountName" . }}
{{- if hasKey .Values.cainjector "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.cainjector.automountServiceAccountToken }}
{{- end }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.cainjector.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-cainjector
{{- with .Values.cainjector.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.cainjector.image.pullPolicy }}
args:
{{- if .Values.global.logLevel }}
- --v={{ .Values.global.logLevel }}
{{- end }}
{{- with .Values.global.leaderElection }}
- --leader-election-namespace={{ .namespace }}
{{- if .leaseDuration }}
- --leader-election-lease-duration={{ .leaseDuration }}
{{- end }}
{{- if .renewDeadline }}
- --leader-election-renew-deadline={{ .renewDeadline }}
{{- end }}
{{- if .retryPeriod }}
- --leader-election-retry-period={{ .retryPeriod }}
{{- end }}
{{- end }}
{{- with .Values.cainjector.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.cainjector.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.cainjector.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.cainjector.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "cainjector.fullname" . }}-psp
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "cainjector.fullname" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,22 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cainjector.fullname" . }}-psp
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cainjector.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,51 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "cainjector.fullname" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}
{{- end }}

View File

@@ -0,0 +1,103 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cainjector.fullname" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "create", "update", "patch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cainjector.fullname" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cainjector.fullname" . }}
subjects:
- name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
# leader election rules
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "cainjector.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
rules:
# Used for leader election by the controller
# cert-manager-cainjector-leader-election is used by the CertificateBased injector controller
# see cmd/cainjector/start.go#L113
# cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller
# see cmd/cainjector/start.go#L137
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"]
verbs: ["get", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
---
# grant cert-manager permission to manage the leaderelection configmap in the
# leader election namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "cainjector.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "cainjector.fullname" . }}:leaderelection
subjects:
- kind: ServiceAccount
name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,27 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.cainjector.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.cainjector.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.cainjector.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
{{- with .Values.cainjector.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,168 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "cert-manager.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ template "cert-manager.name" . }}
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- with .Values.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- with .Values.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:
app: {{ template "cert-manager.name" . }}
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if and .Values.prometheus.enabled (not .Values.prometheus.servicemonitor.enabled) }}
{{- if not .Values.podAnnotations }}
annotations:
{{- end }}
prometheus.io/path: "/metrics"
prometheus.io/scrape: 'true'
prometheus.io/port: '9402'
{{- end }}
spec:
serviceAccountName: {{ template "cert-manager.serviceAccountName" . }}
{{- if hasKey .Values "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-controller
{{- with .Values.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- if .Values.global.logLevel }}
- --v={{ .Values.global.logLevel }}
{{- end }}
{{- if .Values.clusterResourceNamespace }}
- --cluster-resource-namespace={{ .Values.clusterResourceNamespace }}
{{- else }}
- --cluster-resource-namespace=$(POD_NAMESPACE)
{{- end }}
{{- with .Values.global.leaderElection }}
- --leader-election-namespace={{ .namespace }}
{{- if .leaseDuration }}
- --leader-election-lease-duration={{ .leaseDuration }}
{{- end }}
{{- if .renewDeadline }}
- --leader-election-renew-deadline={{ .renewDeadline }}
{{- end }}
{{- if .retryPeriod }}
- --leader-election-retry-period={{ .retryPeriod }}
{{- end }}
{{- end }}
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.ingressShim }}
{{- if .defaultIssuerName }}
- --default-issuer-name={{ .defaultIssuerName }}
{{- end }}
{{- if .defaultIssuerKind }}
- --default-issuer-kind={{ .defaultIssuerKind }}
{{- end }}
{{- if .defaultIssuerGroup }}
- --default-issuer-group={{ .defaultIssuerGroup }}
{{- end }}
{{- end }}
{{- if .Values.featureGates }}
- --feature-gates={{ .Values.featureGates }}
{{- end }}
ports:
- containerPort: 9402
name: http-metrics
protocol: TCP
{{- with .Values.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.http_proxy }}
- name: HTTP_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.https_proxy }}
- name: HTTPS_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.no_proxy }}
- name: NO_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.podDnsPolicy }}
dnsPolicy: {{ . }}
{{- end }}
{{- with .Values.podDnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if .Values.webhook.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "webhook.fullname" . }}-allow-egress
namespace: {{ include "cert-manager.namespace" . }}
spec:
egress:
{{- with .Values.webhook.networkPolicy.egress }}
{{- toYaml . | nindent 2 }}
{{- end }}
podSelector:
matchLabels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
policyTypes:
- Egress
{{- end }}

View File

@@ -0,0 +1,25 @@
{{- if .Values.webhook.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "webhook.fullname" . }}-allow-ingress
namespace: {{ include "cert-manager.namespace" . }}
spec:
ingress:
{{- with .Values.webhook.networkPolicy.ingress }}
{{- toYaml . | nindent 2 }}
{{- end }}
podSelector:
matchLabels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
policyTypes:
- Ingress
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "cert-manager.fullname" . }}-psp
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "cert-manager.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-psp
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}

View File

@@ -0,0 +1,49 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "cert-manager.fullname" . }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}

View File

@@ -0,0 +1,545 @@
{{- if .Values.global.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "cert-manager.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cert-manager-controller"]
verbs: ["get", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
---
# grant cert-manager permission to manage the leaderelection configmap in the
# leader election namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "cert-manager.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "cert-manager.fullname" . }}:leaderelection
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
---
# Issuer controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-issuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["issuers", "issuers/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# ClusterIssuer controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers", "clusterissuers/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Certificates controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificates
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"]
verbs: ["get", "list", "watch"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["cert-manager.io"]
resources: ["certificates/finalizers", "certificaterequests/finalizers"]
verbs: ["update"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders"]
verbs: ["create", "delete", "get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Orders controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-orders
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders", "orders/status"]
verbs: ["update", "patch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders", "challenges"]
verbs: ["get", "list", "watch"]
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers", "issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges"]
verbs: ["create", "delete"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Challenges controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-challenges
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
# Use to update challenge resource status
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "challenges/status"]
verbs: ["update", "patch"]
# Used to watch challenge resources
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges"]
verbs: ["get", "list", "watch"]
# Used to watch challenges, issuer and clusterissuer resources
- apiGroups: ["cert-manager.io"]
resources: ["issuers", "clusterissuers"]
verbs: ["get", "list", "watch"]
# Need to be able to retrieve ACME account private key to complete challenges
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
# Used to create events
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
# HTTP01 rules
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch", "create", "delete", "update"]
- apiGroups: [ "gateway.networking.k8s.io" ]
resources: [ "httproutes" ]
verbs: ["get", "list", "watch", "create", "delete", "update"]
# We require the ability to specify a custom hostname when we are creating
# new ingress resources.
# See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148
- apiGroups: ["route.openshift.io"]
resources: ["routes/custom-host"]
verbs: ["create"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges/finalizers"]
verbs: ["update"]
# DNS01 rules (duplicated above)
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
---
# ingress-shim controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests"]
verbs: ["create", "update", "delete"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses/finalizers"]
verbs: ["update"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways", "httproutes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways/finalizers", "httproutes/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-issuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-issuers
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificates
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-certificates
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-orders
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-orders
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-challenges
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-challenges
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-view
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- if .Values.global.rbac.aggregateClusterRoles }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- end }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "orders"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-edit
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- if .Values.global.rbac.aggregateClusterRoles }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- end }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates/status"]
verbs: ["update"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "orders"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["signers"]
verbs: ["approve"]
resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
# Permission to:
# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers
# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/status"]
verbs: ["update", "patch"]
- apiGroups: ["certificates.k8s.io"]
resources: ["signers"]
resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
verbs: ["sign"]
- apiGroups: ["authorization.k8s.io"]
resources: ["subjectaccessreviews"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- if .Values.prometheus.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "cert-manager.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.serviceAnnotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- with .Values.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
ports:
- protocol: TCP
port: 9402
name: tcp-prometheus-servicemonitor
targetPort: {{ .Values.prometheus.servicemonitor.targetPort }}
selector:
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- end }}

View File

@@ -0,0 +1,25 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- with .Values.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,45 @@
{{- if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "cert-manager.fullname" . }}
{{- if .Values.prometheus.servicemonitor.namespace }}
namespace: {{ .Values.prometheus.servicemonitor.namespace }}
{{- else }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
prometheus: {{ .Values.prometheus.servicemonitor.prometheusInstance }}
{{- with .Values.prometheus.servicemonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.prometheus.servicemonitor.annotations }}
annotations:
{{- with .Values.prometheus.servicemonitor.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
spec:
jobLabel: {{ template "cert-manager.fullname" . }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- if .Values.prometheus.servicemonitor.namespace }}
namespaceSelector:
matchNames:
- {{ include "cert-manager.namespace" . }}
{{- end }}
endpoints:
- targetPort: {{ .Values.prometheus.servicemonitor.targetPort }}
path: {{ .Values.prometheus.servicemonitor.path }}
interval: {{ .Values.prometheus.servicemonitor.interval }}
scrapeTimeout: {{ .Values.prometheus.servicemonitor.scrapeTimeout }}
honorLabels: {{ .Values.prometheus.servicemonitor.honorLabels }}
{{- end }}

View File

@@ -0,0 +1,77 @@
{{- if .Values.startupapicheck.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "startupapicheck.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.jobAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
backoffLimit: {{ .Values.startupapicheck.backoffLimit }}
template:
metadata:
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 8 }}
{{- with .Values.startupapicheck.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.startupapicheck.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: OnFailure
serviceAccountName: {{ template "startupapicheck.serviceAccountName" . }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.startupapicheck.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-startupapicheck
{{- with .Values.startupapicheck.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.startupapicheck.image.pullPolicy }}
args:
- check
- api
- --wait={{ .Values.startupapicheck.timeout }}
{{- with .Values.startupapicheck.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.startupapicheck.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.startupapicheck.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.startupapicheck.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.startupapicheck.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.startupapicheck.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "startupapicheck.fullname" . }}-psp
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "startupapicheck.fullname" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "startupapicheck.fullname" . }}-psp
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "startupapicheck.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "startupapicheck.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,51 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "startupapicheck.fullname" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
{{- with .Values.startupapicheck.rbac.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'projected'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}
{{- end }}

View File

@@ -0,0 +1,48 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.rbac.create }}
# create certificate role
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "startupapicheck.fullname" . }}:create-cert
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "startupapicheck.fullname" . }}:create-cert
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "startupapicheck.fullname" . }}:create-cert
subjects:
- kind: ServiceAccount
name: {{ template "startupapicheck.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,27 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.startupapicheck.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.startupapicheck.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "startupapicheck.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.startupapicheck.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if .Values.webhook.config -}}
{{- if not .Values.webhook.config.apiVersion -}}
{{- fail "webhook.config.apiVersion must be set" -}}
{{- end -}}
{{- if not .Values.webhook.config.kind -}}
{{- fail "webhook.config.kind must be set" -}}
{{- end -}}
{{- end -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
data:
{{- if .Values.webhook.config }}
config.yaml: |
{{ .Values.webhook.config | toYaml | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,172 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
{{- with .Values.webhook.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.webhook.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- with .Values.webhook.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 8 }}
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "webhook.serviceAccountName" . }}
{{- if hasKey .Values.webhook "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.webhook.automountServiceAccountToken }}
{{- end }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.webhook.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.webhook.hostNetwork }}
hostNetwork: true
{{- end }}
containers:
- name: {{ .Chart.Name }}-webhook
{{- with .Values.webhook.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
args:
{{- if .Values.global.logLevel }}
- --v={{ .Values.global.logLevel }}
{{- end }}
{{- if .Values.webhook.config }}
- --config=/var/cert-manager/config/config.yaml
{{- end }}
{{- $config := default .Values.webhook.config "" }}
{{ if not $config.securePort -}}
- --secure-port={{ .Values.webhook.securePort }}
{{- end }}
{{- $tlsConfig := default $config.tlsConfig "" }}
{{ if or (not $config.tlsConfig) (and (not $tlsConfig.dynamic) (not $tlsConfig.filesystem) ) -}}
- --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE)
- --dynamic-serving-ca-secret-name={{ template "webhook.fullname" . }}-ca
- --dynamic-serving-dns-names={{ template "webhook.fullname" . }}
- --dynamic-serving-dns-names={{ template "webhook.fullname" . }}.$(POD_NAMESPACE)
- --dynamic-serving-dns-names={{ template "webhook.fullname" . }}.$(POD_NAMESPACE).svc
{{ if .Values.webhook.url.host }}
- --dynamic-serving-dns-names={{ .Values.webhook.url.host }}
{{- end }}
{{- end }}
{{- with .Values.webhook.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- name: https
protocol: TCP
{{- if $config.securePort }}
containerPort: {{ $config.securePort }}
{{- else if .Values.webhook.securePort }}
containerPort: {{ .Values.webhook.securePort }}
{{- else }}
containerPort: 6443
{{- end }}
- name: healthcheck
protocol: TCP
{{- if $config.healthzPort }}
containerPort: {{ $config.healthzPort }}
{{- else }}
containerPort: 6080
{{- end }}
livenessProbe:
httpGet:
path: /livez
{{- if $config.healthzPort }}
port: {{ $config.healthzPort }}
{{- else }}
port: 6080
{{- end }}
scheme: HTTP
initialDelaySeconds: {{ .Values.webhook.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.webhook.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.webhook.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.webhook.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.webhook.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
{{- if $config.healthzPort }}
port: {{ $config.healthzPort }}
{{- else }}
port: 6080
{{- end }}
scheme: HTTP
initialDelaySeconds: {{ .Values.webhook.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.webhook.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.webhook.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.webhook.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.webhook.readinessProbe.failureThreshold }}
{{- with .Values.webhook.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.webhook.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.webhook.config }}
volumeMounts:
- name: config
mountPath: /var/cert-manager/config
{{- end }}
{{- with .Values.webhook.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.webhook.config }}
volumes:
- name: config
configMap:
name: {{ include "webhook.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,46 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ include "webhook.fullname" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
annotations:
cert-manager.io/inject-ca-from-secret: {{ printf "%s/%s-ca" (include "cert-manager.namespace" .) (include "webhook.fullname" .) | quote }}
{{- with .Values.webhook.mutatingWebhookConfigurationAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
webhooks:
- name: webhook.cert-manager.io
rules:
- apiGroups:
- "cert-manager.io"
- "acme.cert-manager.io"
apiVersions:
- "v1"
operations:
- CREATE
- UPDATE
resources:
- "*/*"
admissionReviewVersions: ["v1"]
# This webhook only accepts v1 cert-manager resources.
# Equivalent matchPolicy ensures that non-v1 resource requests are sent to
# this webhook (after the resources have been converted to v1).
matchPolicy: Equivalent
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
failurePolicy: Fail
# Only include 'sideEffects' field in Kubernetes 1.12+
sideEffects: None
clientConfig:
{{- if .Values.webhook.url.host }}
url: https://{{ .Values.webhook.url.host }}/mutate
{{- else }}
service:
name: {{ template "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
path: /mutate
{{- end }}

View File

@@ -0,0 +1,18 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "webhook.fullname" . }}-psp
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "webhook.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "webhook.fullname" . }}-psp
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "webhook.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "webhook.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}

View File

@@ -0,0 +1,54 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "webhook.fullname" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
hostNetwork: {{ .Values.webhook.hostNetwork }}
{{- if .Values.webhook.hostNetwork }}
hostPorts:
- max: {{ .Values.webhook.securePort }}
min: {{ .Values.webhook.securePort }}
{{- end }}
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}

Some files were not shown because too many files have changed in this diff Show More