Ansible Script 추가

This commit is contained in:
ByeonJungHun
2023-12-19 13:36:16 +09:00
parent 0273450ff6
commit 05cb8d9269
2610 changed files with 281893 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
---
- hosts: cluster
become: true
gather_facts: true
environment:
KUBECONFIG: /root/.kube/ansible_config
roles:
- role: agent_os_setting

View File

@@ -0,0 +1,23 @@
---
- hosts: agent
become: true
roles:
- role: dsk_bot.datasaker
vars:
datasaker_api_key: "XQOt9G3oAtsOQyd3U25JwOu3/sE+zj/m3kRKL/d0OUAQn30jVlrBKN/gJp9cJ4C9CHU1D1vSEPRxaCk8NuwZh6+v48TiaingDy6F74YGySRvnH0gqdmfxLSGisD/g8/JqBlIwhhyMSVCVfAkcNysLnwLi4xLnZMlvVi2Lzo3MiekSfJS5adR3hAv6pCaCFe2rNW24pYS5PBYkP/kxp/cfYAN/UhVEs5J+h4/iQ5vozQgvWuskBpOjlUeEYZnMZ6Al91gAUmSRoxtzLc+QOdfp7+uDXpwXPm80bQz9bR20Elcr4+rNqLcc2ONwJwrSveDSvJn4xjB6n95hEYbaDHUpA=="
datasaker_agents: ["dsk-node-agent","dsk-log-agent"]
datagate_trace_url: 10.10.43.111
datagate_manifest_url: 10.10.43.111
datagate_metric_url: 10.10.43.111
datagate_plan_url: 10.10.43.111
datagate_loggate_url: 10.10.43.111
datasaker_api_url: 10.10.43.111:31501
logs:
- collect:
type: file
file:
paths: ["/var/log/*.log","/datasaker/log/*.log"]
#uninstall: True
#datasaker_clean: True

View File

@@ -0,0 +1,9 @@
---
- hosts: cluster
become: true
gather_facts: true
environment:
KUBECONFIG: /root/.kube/ansible_config
roles:
- role: api_os_setting

View File

@@ -0,0 +1,11 @@
---
- hosts: cluster
remote_user: root
tasks:
- name: key add
authorized_key:
user: root
state: present
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
manage_dir: False

View File

@@ -0,0 +1,95 @@
---
- hosts: bastion
become: true
gather_facts: true
roles:
- role: bastion
vars:
- sshmainport: 2222
admin_users:
- name: "minchulahn"
ip: "10.20.142.22"
description: "안민철"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKDxtkcfx2ITlT2Yh7ZCT79do/25YQ2vROz38m8veAuBhOw+75oZJ4nN//zOWaaMvpC3Z7NIzOR+3UeukhnLZ591q8AaHcKjV8JEJMo2pvpH1vdLcTL9baLqWrxzgRimnZUNf5n5HNr+AKoXuPp//aVSJSoeznb66r04/rJSetT0QGDC8Kj5Q+MNvdd0/3U/nu7JxW9LIEaLoeiX6mVb4PpV7kl3rI3Vut/GnWakOhbS4yNvIFdR6d8rv305/BXJOz/aWy+0j7qK+NBzbSsI/l0vVUHfeD3whYGePCpWmj73ZsMTMjIjrC8DpRQlOJlAZ0GVpQnd/ayIWi4+V8VjvFcd6vSqrhhsNoOyo0Y/6cyO6iyvKqohMK6+HF1w6aXoaGCFFSl/3gw63saNAsdZPArnwf5yZ6GfPa/9bRn2k9g5xfp97Itpo6Iqq+PuRcZOes0EiIQe2hOoYQEIHIRhf8CZ+Xf6W1+XZB+WxEzUe4GCCwgUdTB6RIr4ThDxwCBV0="
- name: "havelight"
ip: "10.20.142.21"
description: "정재희"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDUAppqxDLltrMsYMwIxGi0FA5STA/R+H6oy7myfiJP2Lt4woCogMi3ELVKEhFkeJx4i8y9G80lynEYCHRH1kAQ/7YaJEVFrPXTvBw+OVxYdVS/gLl0rL89ky+n0dv6A9mancrvUOMacI5aN7/W+EhoLohRjRbWlsPGNnvAmO0AZnt595aMUjFkdhusGyBVunDUFSitj9TFkjxDhr6cx8Bi0FLpvdsoAvfqiw/MVKW2pMgj56AT5UCT0wvtSHSNY/C731jP/RKrxP0fnVhIkVys/XmLV/6SVEqL1XwqMTvRfi5+Q8cPsXrnPuUFHiNN4e/MGJkYi0lg7XbX8jDXv3ybdxZ7lGiUDebxjTKBCCghFae3eAwpJADEDfrzb8DHJZFwJVVdKGXvStTWTibcs14ilRPcB4SWIBx/cFCzwOBK/iw8CfEfsbVe6WQbDc4T4LrgL8cUzHPOO8CQcC4DV/O3BuoqQExu6xTmU8rhLT9kgatIdX0K5jgGbuqz7c2lelU="
- name: "sa_8001"
ip: "10.20.142.50"
description: "변정훈"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCgvFtLP7A1bR2ANxHiyTalgaI2pvxnCAzsqTAAh/+egIOi2vUIC2jRWGQXyoiTlupdNWQli2D93tEJBvG3VO5LOVocOHsFnFcV8RsiR4QGhqMeXRfMBWbf7Prby0qWv/VQ00gNWEgEjZUhOfBQeJsozGTd3dS4AgRnQkQmnvCT6TWD7+GwMg1SDlu/23y5aKLmpLkVT9kEG3yxZ3rWQfepjAubt+/saZPtyhkmc9+qhe2K+6PCZU2MCh6TYoKrcRUhVaJLvWqS35/Cv/9oxLg7lZwsasHFO9ANXWV9gBelCXLpYosN5hylUvl4JmSN+/qiOH3hpEbOtTCY/ZU0o1/xXLr0pmbYpZoT6zMKZ5fkweW7xidrg/bI1s/4+DVf4c/NJehw4PL3sqRmVdJsriFUifywh05Up5j1NQANiFlFngwEWy81cWRyvSL5q/plJHSvpd6g+WbsyC/QqYNAhxjnEosOb52QGZmLL7GqaC1hdKDOlJYZK63EBQ8YpHqGHo0="
allow_users:
- name: "wkd1994"
ip: "10.20.142.28"
description: "김동우"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDtmuAhVVyVJ87+t2xVtiS3bvTxxn0dmD7t4D2iSvSErIjRsXRCPLdc/yGWiezj+oVZtRPLJ2mjKToGUerdkcW8oqiQeL0+x/CjdlS2rQXvQa2HXCjB+MejwZyJ2bl7VDtIMdLianJBn7+XVc48+bIf7yait8yVH1aVWcS/AXOSo9LwX/uNW5VCL5BeXSGwXdwkuhjeJurR4WIVSBXuh1ql5Vy6BdSxcmLMihNlIL/DyuzfPLuQZbuSeaJ7eJKiHu63/SwBA1cPzj9tgI7zNvguapIHKXvoK8n5gNUXVRDGnD4J6xbzUQB3DbU8kaz7pDClxzgpkf3MnvP9QvnTyqV+aftYlb02as0PrwIxlTlW/sBxyEGdFe+JwoTctHkrSfp0lYRpyCv3eXJcdDu2l3dTJXAHlpcJuQRH2j9herURxML0w6re1iKJ8MAjOqUvh+B3A1U3x116zEGdsCNCRcfwehEir7fmGKaPvrmOiDOTlNswdL/OJ1RHKFuEZJPlUr8="
- name: "djkim"
ip: "10.20.142.36"
description: "김득진"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9Go9pLADJUQtq+ptTAcSIpi+VYv5/Kik0lBuV8xEc++vNtix5kwi+XSsNShHM3MVeiE8J27rYfyNn79r5pVKMuasMRyP3mTDZtRKr7/piM8MXuGSu1jCsVrTBZX0Sf4wuOA1tSkG9QgjBMZfvE9jOSYozA1K85mVE28m2rTihPnL5zYsDKnx+xIcwUBTpkOCoHiAfAX9b5ADAfScJigSZDjFLvexJ1aapPV2Iajh8huIhWvCUhrqUv/ldUm+b1iiOT7GXdrM/cam3FnLZ0b5KI9CQb7084+4l0BlmtPkuFcIlTDm1K6YO7+Mewd+F9uQZwvxGuElBPg8NVgFLD7+nrf2VlJYYCAeChyDV5+ZD70pSTcvHpJbmLKMtRFGov73ZPJ3vld9XCGUCajaoZz5Kz+ANmSC9nl3FpxnYgvFWfS7iwyC+VkGRKUg96/crXz4D8fW/wIskt+3cVrW9Z66psH41ll979mC8xly0ITWwbQZv7rvbdWSDVKVRgbXQOSc="
- name: "sanghee1357"
ip: "10.20.142.40"
description: "김상희"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC45maYW137cwvdS8AE9UzNHq9AMYrkEJtoNDAOVkUXtpVQITqvBCc4B4FfR5JK2h+imnBDng5fu728YAB7q31BE3Wub8I+QWhnQgv+kH1yMWj2s329tkHvcyNWIHSBqw4z1N74Zba+7mojKioju27HdcRcN1L7tpXSCHrq5bU6++CMShpZ7a3wo20RfikFWd563Y15mE3uDqlbkcuzE0KGSNrdY6Gy9aiE3/poVQRLaCmXnUKNw9wM3UGN9DanJi6iosXrlZRkpwhV+tHh2x+BWCbyY8jj94RDJgMwoKw71tzlEp+B1k6a7g+lEo3KFP//3PQxc9fdKBdg1YzSAKGKjsqATEVclmQHVskk6wZQC/wcjFxrSOreSp6knswX9AhIvGhMtoVo9iMy9cm+F4AauzjjfszCMO484983hIYwsh321VB14Wg7NroCYMUh7krATeKmNWhK0YicYCXINVMphBAcXFhuJduPejz19ZN356t+F/LDqlCxW7kO9QfYUy0="
- name: "jinbekim"
ip: "10.10.142.48"
description: "김진범"
- name: "bypark"
ip: "10.20.142.26"
description: "박병욱"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCZig/9xMWR3QhwHPbkvY2b9nmHiWkJHgnztGfIyxVTmkcsr9QViIvNUINlRBlE2+I5j7R2+0qI5GkAYndJsQoZiZ3iPqxnM5KdB9bEbWS5Tv7pbGyHyzaYPMUS3g6ZRMKnbJlAmhOLuq4TNYaUSESvaiYbCbaZK2JdsfPtSC99Gez6+HNoapILeg6xkxLnMsgUG6QzGaZyRABlPRbctGfx2U7cYe/7b7T+/yNtMU2FKrAJqcy0S1IUzc/dK2m5SQ3Y2GMohuGkv8mfs16i0wi3LfgEIatsmj2KB7Y7lIYW/GEZA2I+K2uH9Pu+F/kmGvAu5jNd1ztSo9MgElyu2NMXYhM3f/eDD+PdHKjUvOtE5twNBHQooPjBpp/mja4hnxLKepTqgP1t6azncPB8m6jC6MTbkhOHpgSNXurhx0kCurLA+l9KaySidhc0mFNJZGRKAhQoMIDFgXlzkZ4GmmtbfOJ/J1k7QqHZya5x6M4mOfvlPECFKVF24vzJVEulY3E="
- name: "joonsoopark"
ip: "10.20.142.33"
description: "박준수"
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICeOzKeL4ZUXw0lEHDZoBsp7M3oobrBI0sWBHdpk0X0T"
- name: "baekchan1024"
ip: "10.20.142.39"
description: "백승찬"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDaqqy9YVwxh37xOU0nytBPd6GEJe30e1L/K5UXDZToteNebyfQrtFogxB6MpMNaAzAk6qbyPuZA3rgP8Y+qXgRlx88cxje5P5yOCsMW2o3xD5PiJ7lluWQ9tlS5ti4B9EWurJOsGF27XKKuSHN+dx9ZIb4sDqLYzmycPNwFaEtH6GQ2vjqpPMfjmKAuYmKD4L7mdA8lXTiRS2uYDkUxwQ+6PU+axTauD9qsXuGDAnGkVHKNE0o9OCf1uoyOhy6EB2sDz5Pymr7fbRJauWNxuSJdYPKY33GdDKpioP/1nRLSLtr1nvLHVrG/5CSNO1x20WYXFEGoMTzW4T5nYSS61apHkQ/0Csv0LBeHPc9gsMPobNJpIYlvGwdODQ+fpgxyB4SAQJKtQR1YB4w5OVtXVZAMvZZKI9gQQHZ8wQ4Zk0erGxKeyLxnDrKKNHLRPyUrjkL7H2a0i8BGpdk8sxW9NVrJJGgmQQiPbJx0yvIi1n55mUq+ZVjiF5qPvxtc5D133k="
- name: "jungry"
ip: "10.20.142.44"
description: "서정우"
- name: "ose"
ip: "10.20.142.34"
description: "오승은"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDAlYSGJZpOOEPZqIa1/CXxiaUNj1wsgkp0kEyD2SX8r7ovwmSAWCS24v/IOSgsUTFRpL64vIeCtcZ8sj4Hwzd3F2h+carQP0v+leCkzPpQ7aP/BoPS27+fSCzaOZv/QJ+eIcXWHIbWkXf6MYQ35PykDeJIO61OMOlWhpNV425VSwfZoB72xZmEH+rIZjXHHs8vYtIG2sXZE22BLiVw6PEL/C4QB2khBT5ZAjX2xGEzUoSknzva/8Uu20adQBalFTIdyLV7V6CxkIPkSgfmZh/fqXfbfPsxHLPK2o2ueGbx3fcN3kAqFrqpJgjEIZmNj6qhVPtbN5TSUyIjtoPhC4JR0heqckz1qLah+8lSiUfHSblGW89QuUcedHdwHp/RiZW6HQO0cqS/QPNcgPLTiv68voBapS9rav+j0tt1RynNY+AdhCOoo4BbGW0pXqi0vaHzbbfbzxp78kx/7/KXmUHkzGSkmlXVbKqzDm5k/kRn0q4pimDun42b+MjNYu3gZz0="
- name: "gurwns1540"
ip: "10.20.142.35"
description: "윤혁준"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1+kC8LzDxwc4gfiGzUQH+CeKGf+elX3oKciMmLQJmpddlcWuRthq1pszufHjypT/FfC/XVLZkGvjMDJUWro/Pen3RcdTcENteVZO/nzQ89nmS/D3tbg6nVWxiast6bDdSEdPF8CKSUAlA+8hTgSCWou7TtOuWGCKj+6HSHctBA41WFLpYInYHWTnC+LY1nwOurjG4qjmgdEzBXMhLWvuZDVE21oIUMEXbjW1dXhHNMKfyn/mUqSSG9zoXZSK0KB8OHhBsbxzFqu5cXC1TTpJOyX05730LUdwF9MevreUS3ws5NY8h0C6EVAOMQqeH5gkwVTHsyXQHtXB9nGI1g7sMIjEzJHkOygK17nAfapWhGFahhaaq42qdo7N3Pj8IjrY3S9EDXnPtQODROj3JVzo3Sgd2FUKDcAIWwJHMAwkaFqciPGIrj4ib81NbOoWn7oCjbIyDxgoxSp1vpW7C25rL22LtrCHyMWPbhV19FJIZqtg7f94JptzLND1pHDnsnfeNAxz9d6oKdcJW5bXUDeDCQxBio1RBF6nNzSRoiD0+FD29of9wNWRd2cBkR8uJV7P9XfXMzMK5q7Wqte/DABs3wJ3v/cth6kPrRV7j2h+4DGbEj5Mpz8XAFnGkZFmd/UiSbNqRBLKmp0lPpyxZrRU00xuqJ51pYB2wMwkQgOIVuw=="
- name: "yyeun"
ip: "10.20.142.45"
description: "이예은"
- name: "sujung"
ip: "10.20.142.27"
description: "정성락"
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKbI5DjRkABz65NnREzf5HKKIMPrIA4DrnDDXTrjnRH8"
- name: "antcho"
ip: "10.20.142.46"
description: "조혜수"
- name: "stdhsw"
ip: "10.20.142.32"
description: "한승우"
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIANTMTgqbTtIKKRsZU9An9D3La9Fh1bUtiLE/Y0nL4CZ"
- name: "seungjinjeong"
ip: "10.20.142.41"
description: "정승진"
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDi8funYVM0eRmfplW5EdnfJOFEVDEMMw08VRn6FD9x9VuNWCEkY3iErzekBY2SRat8g6q0VXRyu7b/bhm/kD+BtI79fmz9FKxslTZCeKKN1KWfoZoXSRnvjOX1Y6NDnY2X5M+3kN40ek9ku6abN6lOtInTXJ1QOJIISa8l6vrB/j1xVVZghTYY5MBMc89cRZESGdBZWld0CtmoM+mnjh5vWCCA3VJTcDbj5LKtWllA6t58KwtGBikr8iaOpi83dQ91eXWzxTttl/LCe9bfgSxYlmvZILn0UZMu1WiWBhlIBzC6RlxorkDVRXcSRjguEt+/ys2rv6UTSkm150O4PgjgxlZPmTJt1m5y/St57LELUVbV6XGSq6+eZNTZOYBxxRkKcV0uByCBjxjsVlMmoEZoxedhSVT1Z8/AiMnjPBjXx2ease04EvtZs6rpDRd0puzcx1TKoCkyak60ymxc91X9lQg3kUl0av/G5kMKJQqW6v31GA1Vnh4K9haCVF/Ki/M="

View File

@@ -0,0 +1,11 @@
---
- hosts: cluster
become: true
gather_facts: true
environment:
KUBECONFIG: /root/.kube/ansible_config
roles:
- role: cmoa_os_setting
- role: cmoa_install
delegate_to: 127.0.0.1

View File

@@ -0,0 +1,33 @@
- hosts: servers
become: true
roles:
- role: dsk_bot.datasaker
#- role: agent-ansible
vars:
datasaker_api_key: "1VL7/mhddWkQaS/vf/VjjwjnwaUhtZnLL++ih9LxYSB7HVkPpZw1Duy/4gxLN/73Vga00bD79mVd6N4dP0BVxmGqLnR6xItnSLlO3M6LmOMuM8bLaBuxxOvxST3lxpvtI0B2ilyjqTLh5y+NJWFV7Awq4zpqnPnTZ5dryp3yc4zc3C7Vxu0f2CL7/oGT0LRj/1l7gchuUxw2TVDLFFRylb+cFt6/NNylBxIb1wKGILd7N6NGgnsdRcrv4ZvTEPusrDqxO3IRYF6z9ZNbkQ1BPeDINtVFTgwhqFZjxg6erd8oqscB9n1DHOi6+tJ8VSHi2w5hYxHq93EV4cxBfzXAug=="
datasaker_docker_agents:
- "dsk-docker-node-agent"
- "dsk-docker-trace-agent"
- "dsk-docker-log-agent"
- "dsk-docker-postgres-agent"
postgres_user_name: sample
postgres_user_password: 1q2w3e4r
postgres_database_address: 0.0.0.0
postgres_database_port: 5432
plan_postgres_user_name: sample
plan_postgres_user_password: 1q2w3e4r
plan_postgres_database_address: 0.0.0.0
plan_postgres_database_name: sample
plan_postgres_database_port: 5432
logs:
- collect:
type: file
file:
paths:
- /var/log/*.log
- /var/lib/docker/containers/*/*.log
custom_log_volume:
- /var/log/
- /var/lib/docker/containers
#uninstall: True
#datasaker_clean: True

View File

@@ -0,0 +1,50 @@
---
- hosts: agent
become: true
roles:
- role: dsk_bot.datasaker
vars:
datasaker_api_key: "XQOt9G3oAtsOQyd3U25JwOu3/sE+zj/m3kRKL/d0OUAQn30jVlrBKN/gJp9cJ4C9CHU1D1vSEPRxaCk8NuwZh6+v48TiaingDy6F74YGySRvnH0gqdmfxLSGisD/g8/JqBlIwhhyMSVCVfAkcNysLnwLi4xLnZMlvVi2Lzo3MiekSfJS5adR3hAv6pCaCFe2rNW24pYS5PBYkP/kxp/cfYAN/UhVEs5J+h4/iQ5vozQgvWuskBpOjlUeEYZnMZ6Al91gAUmSRoxtzLc+QOdfp7+uDXpwXPm80bQz9bR20Elcr4+rNqLcc2ONwJwrSveDSvJn4xjB6n95hEYbaDHUpA=="
datasaker_agents: ["dsk-node-agent","dsk-log-agent"]
#datasaker_api_key: "eO58wEYK/2HThAV+5jgv7Or/qW3zJknBQF0FJt5Xo4kSZ9YH2/CJgfNUwKbGwlbzmihG9dVsSmmS40szOuvRVZJO0vPga98sJNI32AJdWaYX8oCNFouI0lYG+r9Y4vahrS7+FVwntyfkjETotqBDvoQ5HjGjvW0wviPagW/alNbI5pvpWwBHtgz9D83Y8DSvCvO64G4xhyIYZPSML11EqWUO8prYT8LfdD4n2oBp0QJ3cXKdvJAUc4w5LKbTASb8x8UTpVU3JH3Wnwe79PKftJ8YdxOtb5jjzXeOEEM2GD8xz4pbB7scCx5oJCWQLF1js6a2uFLENBgW+ztHRf1j2Q=="
#datasaker_api_key: "1VL7/mhddWkQaS/vf/VjjwjnwaUhtZnLL++ih9LxYSB7HVkPpZw1Duy/4gxLN/73Vga00bD79mVd6N4dP0BVxmGqLnR6xItnSLlO3M6LmOMuM8bLaBuxxOvxST3lxpvtI0B2ilyjqTLh5y+NJWFV7Awq4zpqnPnTZ5dryp3yc4zc3C7Vxu0f2CL7/oGT0LRj/1l7gchuUxw2TVDLFFRylb+cFt6/NNylBxIb1wKGILd7N6NGgnsdRcrv4ZvTEPusrDqxO3IRYF6z9ZNbkQ1BPeDINtVFTgwhqFZjxg6erd8oqscB9n1DHOi6+tJ8VSHi2w5hYxHq93EV4cxBfzXAug=="
datasaker_agents: ["dsk-node-agent","dsk-log-agent"]
#datasaker_docker_agents: ["dsk-docker-log-agent"]
#postgres_user_name: jhjung
#postgres_user_password: 1q2w3e4r
#postgres_database_address: 0.0.0.0
#postgres_database_port: 5432
#plan_postgres_user_name: jhjung
#plan_postgres_user_password: 1q2w3e4r
#plan_postgres_database_address: 0.0.0.0
#plan_postgres_database_port: 5432
#plan_postgres_database_name: test
datagate_trace_url: 10.10.43.111
datagate_trace_port: 31300
datagate_trace_timeout: 5s
datagate_manifest_url: 10.10.43.111
datagate_manifest_port: 31301
datagate_manifest_timeout: 5s
datagate_metric_url: 10.10.43.111
datagate_metric_port: 31302
datagate_metric_timeout: 5s
datagate_plan_url: 10.10.43.111
datagate_plan_port: 31303
datagate_plan_timeout: 5s
datagate_loggate_url: 10.10.43.111
datagate_loggate_port: 31304
datagate_loggate_timeout: 5s
datasaker_api_url: 10.10.43.111:31501
datasaker_api_send_interval: 1m
#uninstall: True
#datasaker_clean: True
logs:
- collect:
type: file
file:
paths: ["/var/log/*.log","/datasaker/log/*.log","/var/log/secure"]

View File

@@ -0,0 +1,645 @@
#!/bin/sh
set -e
# Docker CE for Linux installation script
#
# See https://docs.docker.com/engine/install/ for the installation steps.
#
# This script is meant for quick & easy install via:
# $ curl -fsSL https://get.docker.com -o get-docker.sh
# $ sh get-docker.sh
#
# For test builds (ie. release candidates):
# $ curl -fsSL https://test.docker.com -o test-docker.sh
# $ sh test-docker.sh
#
# NOTE: Make sure to verify the contents of the script
# you downloaded matches the contents of install.sh
# located at https://github.com/docker/docker-install
# before executing.
#
# Git commit from https://github.com/docker/docker-install when
# the script was uploaded (Should only be modified by upload job):
SCRIPT_COMMIT_SHA="66474034547a96caa0a25be56051ff8b726a1b28"
# strip "v" prefix if present
VERSION="${VERSION#v}"
# The channel to install from:
# * nightly
# * test
# * stable
# * edge (deprecated)
DEFAULT_CHANNEL_VALUE="stable"
if [ -z "$CHANNEL" ]; then
CHANNEL=$DEFAULT_CHANNEL_VALUE
fi
DEFAULT_DOWNLOAD_URL="https://download.docker.com"
if [ -z "$DOWNLOAD_URL" ]; then
DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
fi
DEFAULT_REPO_FILE="docker-ce.repo"
if [ -z "$REPO_FILE" ]; then
REPO_FILE="$DEFAULT_REPO_FILE"
fi
mirror=''
DRY_RUN=${DRY_RUN:-}
while [ $# -gt 0 ]; do
case "$1" in
--mirror)
mirror="$2"
shift
;;
--dry-run)
DRY_RUN=1
;;
--*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
case "$mirror" in
Aliyun)
DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
;;
AzureChinaCloud)
DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
;;
esac
command_exists() {
command -v "$@" > /dev/null 2>&1
}
# version_gte checks if the version specified in $VERSION is at least
# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either
# unset (=latest) or newer or equal than the specified version. Returns 1 (fail)
# otherwise.
#
# examples:
#
# VERSION=20.10
# version_gte 20.10 // 0 (success)
# version_gte 19.03 // 0 (success)
# version_gte 21.10 // 1 (fail)
version_gte() {
if [ -z "$VERSION" ]; then
return 0
fi
eval calver_compare "$VERSION" "$1"
}
# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success)
# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch
# releases and pre-release (-alpha/-beta) are not taken into account
#
# examples:
#
# calver_compare 20.10 19.03 // 0 (success)
# calver_compare 20.10 20.10 // 0 (success)
# calver_compare 19.03 20.10 // 1 (fail)
calver_compare() (
set +x
yy_a="$(echo "$1" | cut -d'.' -f1)"
yy_b="$(echo "$2" | cut -d'.' -f1)"
if [ "$yy_a" -lt "$yy_b" ]; then
return 1
fi
if [ "$yy_a" -gt "$yy_b" ]; then
return 0
fi
mm_a="$(echo "$1" | cut -d'.' -f2)"
mm_b="$(echo "$2" | cut -d'.' -f2)"
if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then
return 1
fi
return 0
)
is_dry_run() {
if [ -z "$DRY_RUN" ]; then
return 1
else
return 0
fi
}
is_wsl() {
case "$(uname -r)" in
*microsoft* ) true ;; # WSL 2
*Microsoft* ) true ;; # WSL 1
* ) false;;
esac
}
is_darwin() {
case "$(uname -s)" in
*darwin* ) true ;;
*Darwin* ) true ;;
* ) false;;
esac
}
deprecation_notice() {
distro=$1
distro_version=$2
echo
printf "\033[91;1mDEPRECATION WARNING\033[0m\n"
printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version"
echo " No updates or security fixes will be released for this distribution, and users are recommended"
echo " to upgrade to a currently maintained version of $distro."
echo
printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue."
echo
sleep 10
}
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist"
}
echo_docker_as_nonroot() {
if is_dry_run; then
return
fi
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker version'
) || true
fi
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
echo
echo "================================================================================"
echo
if version_gte "20.10"; then
echo "To run Docker as a non-privileged user, consider setting up the"
echo "Docker daemon in rootless mode for your user:"
echo
echo " dockerd-rootless-setuptool.sh install"
echo
echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
echo
fi
echo
echo "To run the Docker daemon as a fully privileged service, but granting non-root"
echo "users access, refer to https://docs.docker.com/go/daemon-access/"
echo
echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
echo " documentation for details: https://docs.docker.com/go/attack-surface/"
echo
echo "================================================================================"
echo
}
# Check if this is a forked Linux distro
check_forked() {
# Check for lsb_release command existence, it usually exists in forked distros
if command_exists lsb_release; then
# Check if the `-u` option is supported
set +e
lsb_release -a -u > /dev/null 2>&1
lsb_release_exit_code=$?
set -e
# Check if the command has exited successfully, it means we're in a forked distro
if [ "$lsb_release_exit_code" = "0" ]; then
# Print info about current distro
cat <<-EOF
You're using '$lsb_dist' version '$dist_version'.
EOF
# Get the upstream release info
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
# Print info about upstream distro
cat <<-EOF
Upstream release is '$lsb_dist' version '$dist_version'.
EOF
else
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
if [ "$lsb_dist" = "osmc" ]; then
# OSMC runs Raspbian
lsb_dist=raspbian
else
# We're Debian and don't even know it!
lsb_dist=debian
fi
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
fi
fi
fi
}
do_install() {
echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
if command_exists docker; then
cat >&2 <<-'EOF'
Warning: the "docker" command appears to already exist on this system.
If you already have Docker installed, this script can cause trouble, which is
why we're displaying this warning and provide the opportunity to cancel the
installation.
If you installed the current Docker package using this script and are using it
again to update Docker, you can safely ignore this message.
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo -E sh -c'
elif command_exists su; then
sh_c='su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit 1
fi
fi
if is_dry_run; then
sh_c="echo"
fi
# perform some very rudimentary platform detection
lsb_dist=$( get_distribution )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
if is_wsl; then
echo
echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
cat >&2 <<-'EOF'
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
case "$lsb_dist" in
ubuntu)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian|raspbian)
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
;;
centos|rhel|sles)
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
*)
if command_exists lsb_release; then
dist_version="$(lsb_release --release | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
# Check if this is a forked Linux distro
check_forked
# Print deprecation warnings for distro versions that recently reached EOL,
# but may still be commonly used (especially LTS versions).
case "$lsb_dist.$dist_version" in
debian.stretch|debian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
raspbian.stretch|raspbian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
ubuntu.xenial|ubuntu.trusty)
deprecation_notice "$lsb_dist" "$dist_version"
;;
fedora.*)
if [ "$dist_version" -lt 33 ]; then
deprecation_notice "$lsb_dist" "$dist_version"
fi
;;
esac
# Run setup for each distro accordingly
case "$lsb_dist" in
ubuntu|debian|raspbian)
pre_reqs="apt-transport-https ca-certificates curl"
if ! command -v gpg > /dev/null; then
pre_reqs="$pre_reqs gnupg"
fi
apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
(
if ! is_dry_run; then
set -x
fi
$sh_c 'apt-get update -qq >/dev/null'
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
$sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings'
$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg"
$sh_c "chmod a+r /etc/apt/keyrings/docker.gpg"
$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
$sh_c 'apt-get update -qq >/dev/null'
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g")"
search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
echo
exit 1
fi
if version_gte "18.09"; then
search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
echo "INFO: $search_command"
cli_pkg_version="=$($sh_c "$search_command")"
fi
pkg_version="=$pkg_version"
fi
fi
(
pkgs="docker-ce${pkg_version%=}"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io"
fi
if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
# also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
pkgs="$pkgs docker-scan-plugin"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pkgs >/dev/null"
)
echo_docker_as_nonroot
exit 0
;;
centos|fedora|rhel)
if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then
echo "Packages for RHEL are currently only available for s390x."
exit 1
fi
if [ "$lsb_dist" = "fedora" ]; then
pkg_manager="dnf"
config_manager="dnf config-manager"
enable_channel_flag="--set-enabled"
disable_channel_flag="--set-disabled"
pre_reqs="dnf-plugins-core"
pkg_suffix="fc$dist_version"
else
pkg_manager="yum"
config_manager="yum-config-manager"
enable_channel_flag="--enable"
disable_channel_flag="--disable"
pre_reqs="yum-utils"
pkg_suffix="el"
fi
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
(
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pre_reqs"
$sh_c "$config_manager --add-repo $repo_file_url"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "$config_manager $disable_channel_flag docker-ce-*"
$sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
fi
$sh_c "$pkg_manager makecache"
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
echo
exit 1
fi
if version_gte "18.09"; then
# older versions don't support a cli package
search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
fi
# Cut out the epoch and prefix with a '-'
pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
if [ -n "$cli_pkg_version" ]; then
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
# also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
pkgs="$pkgs docker-scan-plugin"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pkgs"
)
echo_docker_as_nonroot
exit 0
;;
sles)
if [ "$(uname -m)" != "s390x" ]; then
echo "Packages for SLES are currently only available for s390x"
exit 1
fi
if [ "$dist_version" = "15.3" ]; then
sles_version="SLE_15_SP3"
else
sles_minor_version="${dist_version##*.}"
sles_version="15.$sles_minor_version"
fi
opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/$sles_version/security:SELinux.repo"
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
pre_reqs="ca-certificates curl libseccomp2 awk"
(
if ! is_dry_run; then
set -x
fi
$sh_c "zypper install -y $pre_reqs"
$sh_c "zypper addrepo $repo_file_url"
if ! is_dry_run; then
cat >&2 <<-'EOF'
WARNING!!
openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now.
Do you wish to continue?
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 30 )
fi
$sh_c "zypper addrepo $opensuse_repo"
$sh_c "zypper --gpg-auto-import-keys refresh"
$sh_c "zypper lr -d"
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")"
search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst zypper list results"
echo
exit 1
fi
search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
# It's okay for cli_pkg_version to be blank, since older versions don't support a cli package
cli_pkg_version="$($sh_c "$search_command")"
pkg_version="-$pkg_version"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
if [ -n "$cli_pkg_version" ]; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "zypper -q install -y $pkgs"
)
echo_docker_as_nonroot
exit 0
;;
*)
if [ -z "$lsb_dist" ]; then
if is_darwin; then
echo
echo "ERROR: Unsupported operating system 'macOS'"
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
exit 1
fi
fi
echo
echo "ERROR: Unsupported distribution '$lsb_dist'"
echo
exit 1
;;
esac
exit 1
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install

View File

@@ -0,0 +1,18 @@
---
- name: Check the health of all servers
hosts: all
tasks:
- name: Check if server is reachable
ping:
register: result
ignore_errors: true
- name: Print result
debug:
var: result
when: result is defined
- name: Print error message
debug:
msg: "Server {{ inventory_hostname }} could not be reached."
when: result.ping is undefined

View File

@@ -0,0 +1,995 @@
#!/bin/bash
set -euo pipefail
SCRIPT_NAME="teleport-installer"
# default values
ALIVE_CHECK_DELAY=3
CONNECTIVITY_TEST_METHOD=""
COPY_COMMAND="cp"
DISTRO_TYPE=""
IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}"
LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons"
LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)"
MACOS_STDERR_LOG="/var/log/teleport-stderr.log"
MACOS_STDOUT_LOG="/var/log/teleport-stdout.log"
SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service"
TARGET_PORT_DEFAULT=443
TELEPORT_ARCHIVE_PATH='teleport'
TELEPORT_BINARY_DIR="/usr/local/bin"
TELEPORT_BINARY_LIST="teleport tctl tsh"
TELEPORT_CONFIG_PATH="/etc/teleport.yaml"
TELEPORT_DATA_DIR="/var/lib/teleport"
TELEPORT_DOCS_URL="https://goteleport.com/docs/"
TELEPORT_FORMAT=""
# initialise variables (because set -u disallows unbound variables)
f=""
l=""
DISABLE_TLS_VERIFICATION=false
NODENAME=$(hostname)
IGNORE_CHECKS=false
OVERRIDE_FORMAT=""
QUIET=false
APP_INSTALL_DECISION=""
INTERACTIVE=false
# the default value of each variable is a templatable Go value so that it can
# optionally be replaced by the server before the script is served up
TELEPORT_VERSION='13.3.4'
TELEPORT_PACKAGE_NAME='teleport'
REPO_CHANNEL=''
TARGET_HOSTNAME='teleport.access.datasaker.io'
TARGET_PORT='443'
JOIN_TOKEN='d5bef76d6d1d2038dbf96a221e429d5e'
JOIN_METHOD=''
JOIN_METHOD_FLAG=""
[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}"
# inject labels into the configuration
LABELS='teleport.internal/resource-id=9182aa69-a343-4111-a587-1efe5b1daa1c'
LABELS_FLAG=()
[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}")
# When all stanza generators have been updated to use the new
# `teleport <service> configure` commands CA_PIN_HASHES can be removed along
# with the script passing it in in `join_tokens.go`.
CA_PIN_HASHES='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8'
CA_PINS='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8'
ARG_CA_PIN_HASHES=""
APP_INSTALL_MODE='false'
APP_NAME=''
APP_URI=''
DB_INSTALL_MODE='false'
# usage message
# shellcheck disable=SC2086
usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; }
while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do
case "${o}" in
v) TELEPORT_VERSION=${OPTARG};;
h) TARGET_HOSTNAME=${OPTARG};;
p) TARGET_PORT=${OPTARG};;
j) JOIN_TOKEN=${OPTARG};;
c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";;
f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;;
q) QUIET=true;;
l) l=${OPTARG};;
i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";;
k) DISABLE_TLS_VERIFICATION=true;;
a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};;
u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};;
*) usage;;
esac
done
shift $((OPTIND-1))
if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then
CA_PIN_HASHES="${ARG_CA_PIN_HASHES}"
fi
# function to construct a go template variable
# go's template parser is a bit finicky, so we dynamically build the value one character at a time
construct_go_template() {
OUTPUT="{"
OUTPUT+="{"
OUTPUT+="."
OUTPUT+="${1}"
OUTPUT+="}"
OUTPUT+="}"
echo "${OUTPUT}"
}
# check whether we are root, exit if not
assert_running_as_root() {
if ! [ "$(id -u)" = 0 ]; then
echo "This script must be run as root." 1>&2
exit 1
fi
}
# function to check whether variables are either blank or set to the default go template value
# (because they haven't been set by the go script generator or a command line argument)
# returns 1 if the variable is set to a default/zero value
# returns 0 otherwise (i.e. it needs to be set interactively)
check_variable() {
VARIABLE_VALUE="${!1}"
GO_TEMPLATE_NAME=$(construct_go_template "${2}")
if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then
return 1
fi
return 0
}
# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes"
is_truthy() {
declare -a TRUTHY_VALUES
TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA")
CHECK_VALUE="$1"
for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done
return 1
}
# function to read input until the value you get is non-empty
read_nonblank_input() {
INPUT=""
VARIABLE_TO_ASSIGN="$1"
shift
PROMPT="$*"
until [[ "${INPUT}" != "" ]]; do
echo -n "${PROMPT}"
read -r INPUT
done
printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}"
}
# error if we're not root
assert_running_as_root
# set/read values interactively if not provided
# users will be prompted to enter their own value if all the following are true:
# - the current value is blank, or equal to the default Go template value
# - the value has not been provided by command line argument
! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): "
! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: "
! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; }
! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: "
! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): "
[ -n "${f}" ] && OVERRIDE_FORMAT=${f}
[ -n "${l}" ] && LOG_FILENAME=${l}
# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already),
# prompt the user to choose whether to enable app_service
if [[ "${INTERACTIVE}" == "true" ]]; then
if ! check_variable APP_INSTALL_MODE appInstallMode; then
APP_INSTALL_MODE="false"
echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] "
read -r APP_INSTALL_DECISION
if is_truthy "${APP_INSTALL_DECISION}"; then
APP_INSTALL_MODE="true"
fi
fi
fi
# prompt for extra needed values if we're running in app service mode
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): "
! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): "
# generate app public addr by concatenating values
APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}"
fi
# set default target port if value not provided
if [[ "${TARGET_PORT}" == "" ]]; then
TARGET_PORT=${TARGET_PORT_DEFAULT}
fi
# clear log file if provided
if [[ "${LOG_FILENAME}" != "" ]]; then
if [ -f "${LOG_FILENAME}" ]; then
echo -n "" > "${LOG_FILENAME}"
fi
fi
# log functions
log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; }
log() {
LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*"
if [[ ${QUIET} != "true" ]]; then
echo "${LOG_LINE}"
fi
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# writes a line with no timestamp or starting data, always prints
log_only() {
LOG_LINE="$*"
echo "${LOG_LINE}"
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# writes a line by itself as a header
log_header() {
LOG_LINE="$*"
echo ""
echo "${LOG_LINE}"
echo ""
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# important log lines, print even when -q (quiet) is passed
log_important() {
LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*"
echo "${LOG_LINE}"
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
log_cleanup_message() {
log_only "This script does not overwrite any existing settings or Teleport installations."
log_only "Please clean up by running any of the following steps as necessary:"
log_only "- stop any running Teleport processes"
log_only " - pkill -f teleport"
log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself"
log_only " - rm -rf ${TELEPORT_DATA_DIR}"
log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}"
log_only " - rm -f ${TELEPORT_CONFIG_PATH}"
log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}"
for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done
log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}"
log_only "Run this installer again when done."
log_only
}
# other functions
# check whether a named program exists
check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; }
# checks for the existence of a list of named binaries and exits with error if any of them don't exist
check_exists_fatal() {
for TOOL in "$@"; do
if ! check_exists "${TOOL}"; then
log_important "Error: cannot find ${TOOL} - it needs to be installed"
exit 1
fi
done
}
# check connectivity to the given host/port and make a request to see if Teleport is listening
# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return
# values aren't really a thing that exists in bash
check_connectivity() {
HOST=$1
PORT=$2
# check with nc
if check_exists nc; then
CONNECTIVITY_TEST_METHOD="nc"
if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi
# if there's no nc, check with telnet
elif check_exists telnet; then
CONNECTIVITY_TEST_METHOD="telnet"
if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi
# if there's no nc or telnet, try and use /dev/tcp
elif [ -f /dev/tcp ]; then
CONNECTIVITY_TEST_METHOD="/dev/tcp"
if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi
else
return 255
fi
}
# check whether a teleport DEB is already installed and exit with error if so
check_deb_not_already_installed() {
check_exists_fatal dpkg awk
DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true)
if [[ ${DEB_INSTALLED} != "" ]]; then
log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})."
log_important "You will need to remove that package before using this script."
exit 1
fi
}
# check whether a teleport RPM is already installed and exit with error if so
check_rpm_not_already_installed() {
check_exists_fatal rpm
RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true)
if [[ ${RPM_INSTALLED} != "" ]]; then
log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})."
log_important "You will need to remove that package before using this script."
exit 1
fi
}
# function to check if given variable is set
check_set() {
CHECK_KEY=${1} || true
CHECK_VALUE=${!1} || true
if [[ "${CHECK_VALUE}" == "" ]]; then
log "Required variable ${CHECK_KEY} is not set"
exit 1
else
log "${CHECK_KEY}: ${CHECK_VALUE}"
fi
}
# checks that teleport binary can be found in path and runs 'teleport version'
check_teleport_binary() {
FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version)
if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then
log "Cannot find Teleport binary"
return 1
else
log "Found: ${FOUND_TELEPORT_VERSION}";
return 0
fi
}
# wrapper to download with curl
download() {
URL=$1
OUTPUT_PATH=$2
CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5"
# optionally allow disabling of TLS verification (can be useful on older distros
# which often have an out-of-date set of CA certificate bundle which won't validate)
if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then
CURL_COMMAND+=" -k"
fi
log "Running ${CURL_COMMAND} ${URL}"
log "Downloading to ${OUTPUT_PATH}"
# handle errors with curl
if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then
log_important "curl error downloading ${URL}"
log "On an older OS, this may be related to the CA certificate bundle being too old."
log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!"
exit 1
fi
# check that the file has a non-zero size as an extra validation
check_exists_fatal wc xargs
FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)"
if [ "${FILE_SIZE}" -eq 0 ]; then
log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue."
exit 1
else
log "Downloaded file size: ${FILE_SIZE} bytes"
fi
# if we have a hashing utility installed, also download and validate the checksum
SHA_COMMAND=""
# shasum is installed by default on MacOS and some distros
if check_exists shasum; then
SHA_COMMAND="shasum -a 256"
# sha256sum is installed by default in some other distros
elif check_exists sha256sum; then
SHA_COMMAND="sha256sum"
fi
if [[ "${SHA_COMMAND}" != "" ]]; then
log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file"
SHA_URL="${URL}.sha256"
SHA_PATH="${OUTPUT_PATH}.sha256"
${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}"
if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then
log "The downloaded file's checksum validated correctly"
else
SHA_EXPECTED=$(cat "${SHA_PATH}")
SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}")
if check_exists awk; then
SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}')
SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}')
fi
log_important "Checksum of the downloaded file did not validate correctly"
log_important "Expected: ${SHA_EXPECTED}"
log_important "Got: ${SHA_ACTUAL}"
log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support."
exit 1
fi
else
log "shasum/sha256sum utilities not found, will skip checksum validation"
fi
}
# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz)
get_download_filename() { echo "${1##*/}"; }
# gets the pid of any running teleport process (and converts newlines to spaces)
get_teleport_pid() {
check_exists_fatal pgrep xargs
pgrep teleport | xargs echo
}
# returns a command which will start teleport using the config
get_teleport_start_command() {
echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}"
}
# installs the teleport-provided launchd config
install_launchd_config() {
log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}"
${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist
}
# installs the teleport-provided systemd unit
install_systemd_unit() {
log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}"
${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH}
log "Reloading unit files (systemctl daemon-reload)"
systemctl daemon-reload
}
# formats the arguments as a yaml list
get_yaml_list() {
name="${1}"
list="${2}"
indentation="${3}"
echo "${indentation}${name}:"
for item in ${list}; do
echo "${indentation}- ${item}"
done
}
# installs the provided teleport config (for app service)
install_teleport_app_config() {
log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}"
CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ")
cat << EOF > ${TELEPORT_CONFIG_PATH}
version: v3
teleport:
nodename: ${NODENAME}
auth_token: ${JOIN_TOKEN}
${CA_PINS_CONFIG}
proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT}
log:
output: stderr
severity: INFO
auth_service:
enabled: no
ssh_service:
enabled: no
proxy_service:
enabled: no
app_service:
enabled: yes
apps:
- name: "${APP_NAME}"
uri: "${APP_URI}"
public_addr: ${APP_PUBLIC_ADDR}
EOF
}
# installs the provided teleport config (for database service)
install_teleport_database_config() {
log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}"
CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ")
# This file is processed by `shellschek` as part of the lint step
# It detects an issue because of un-set variables - $index and $line. This check is called SC2154.
# However, that's not an issue, because those variables are replaced when we run go's text/template engine over it.
# When executing the script, those are no long variables but actual values.
# shellcheck disable=SC2154
cat << EOF > ${TELEPORT_CONFIG_PATH}
version: v3
teleport:
nodename: ${NODENAME}
auth_token: ${JOIN_TOKEN}
${CA_PINS_CONFIG}
proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT}
log:
output: stderr
severity: INFO
auth_service:
enabled: no
ssh_service:
enabled: no
proxy_service:
enabled: no
db_service:
enabled: "yes"
resources:
- labels:
EOF
}
# installs the provided teleport config (for node service)
install_teleport_node_config() {
log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}"
${TELEPORT_BINARY_DIR}/teleport node configure \
--token ${JOIN_TOKEN} \
${JOIN_METHOD_FLAG} \
--ca-pin ${CA_PINS} \
--proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \
"${LABELS_FLAG[@]}" \
--output ${TELEPORT_CONFIG_PATH}
}
# checks whether the given host is running MacOS
is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi }
# checks whether teleport is already running on the host
is_running_teleport() {
check_exists_fatal pgrep
TELEPORT_PID=$(get_teleport_pid)
if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi
}
# checks whether the given host is running systemd as its init system
is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi }
# prints a warning if the host isn't running systemd
no_systemd_warning() {
log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits."
log_important "Please investigate an alternative way to keep Teleport running."
log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}"
log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit."
log_only
log_only "Run this command to start Teleport in future:"
log_only "$(get_teleport_start_command)"
log_only
log_only "------------------------------------------------------------------------"
log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |"
log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |"
log_only "------------------------------------------------------------------------"
log_only
}
# print a message giving the name of the node and a link to the docs
# gives some debugging instructions if the service didn't start successfully
print_welcome_message() {
log_only ""
if is_running_teleport; then
log_only "Teleport has been started."
log_only ""
if is_using_systemd; then
log_only "View its status with 'sudo systemctl status teleport.service'"
log_only "View Teleport logs using 'sudo journalctl -u teleport.service'"
log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'"
log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'"
elif is_macos_host; then
log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'"
log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'"
log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'"
fi
log_only ""
log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'"
log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/"
else
log_important "The Teleport service was installed, but it does not appear to have started successfully."
if is_using_systemd; then
log_important "Check the Teleport service's status with 'systemctl status teleport.service'"
log_important "View Teleport logs with 'journalctl -u teleport.service'"
elif is_macos_host; then
log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'"
fi
log_important "Contact Teleport support for further assistance."
fi
log_only ""
}
# start teleport in foreground (when there's no systemd)
start_teleport_foreground() {
log "Starting Teleport in the foreground"
# shellcheck disable=SC2091
$(get_teleport_start_command)
}
# start teleport via launchd (after installing config)
start_teleport_launchd() {
log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots."
launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist
sleep ${ALIVE_CHECK_DELAY}
}
# start teleport via systemd (after installing unit)
start_teleport_systemd() {
log "Starting Teleport via systemd. It will automatically be started whenever the system reboots."
systemctl enable teleport.service
systemctl start teleport.service
sleep ${ALIVE_CHECK_DELAY}
}
# checks whether teleport binaries exist on the host
teleport_binaries_exist() {
for BINARY_NAME in teleport tctl tsh; do
if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi
done
}
# checks whether a teleport config exists on the host
teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; }
# checks whether a teleport data dir exists on the host
teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; }
# error out if any required values are not set
check_set TELEPORT_VERSION
check_set TARGET_HOSTNAME
check_set TARGET_PORT
check_set JOIN_TOKEN
check_set CA_PIN_HASHES
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
check_set APP_NAME
check_set APP_URI
check_set APP_PUBLIC_ADDR
fi
###
# main script starts here
###
# check connectivity to teleport server/port
if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then
log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check"
else
log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})"
if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then
# if we don't have a connectivity test method assigned, we know we couldn't run the test
if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then
log "Couldn't find nc, telnet or /dev/tcp to do a connection test"
log "Going to blindly continue without testing connectivity"
else
log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}"
log_important "This issue will need to be fixed before the script can continue."
log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script."
exit 1
fi
else
log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good"
fi
fi
# use OSTYPE variable to figure out host type/arch
if [[ "${OSTYPE}" == "linux-gnu"* ]]; then
# linux host, now detect arch
TELEPORT_BINARY_TYPE="linux"
ARCH=$(uname -m)
log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}"
if [[ ${ARCH} == "armv7l" ]]; then
TELEPORT_ARCH="arm"
elif [[ ${ARCH} == "aarch64" ]]; then
TELEPORT_ARCH="arm64"
elif [[ ${ARCH} == "x86_64" ]]; then
TELEPORT_ARCH="amd64"
elif [[ ${ARCH} == "i686" ]]; then
TELEPORT_ARCH="386"
else
log_important "Error: cannot detect architecture from uname -m: ${ARCH}"
exit 1
fi
log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}"
# if the download format is already set, we have no need to detect distro
if [[ ${TELEPORT_FORMAT} == "" ]]; then
# detect distro
# if /etc/os-release doesn't exist, we need to use some other logic
if [ ! -f /etc/os-release ]; then
if [ -f /etc/centos-release ]; then
if grep -q 'CentOS release 6' /etc/centos-release; then
log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]"
log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low."
exit 1
fi
elif [ -f /etc/redhat-release ]; then
if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then
log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]"
log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low."
exit 1
elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then
log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]"
log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low."
exit 1
fi
fi
# use ID_LIKE value from /etc/os-release (if set)
# this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc
else
check_exists_fatal cut
DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true
if [[ ${DISTRO_TYPE} == "" ]]; then
# use exact ID value from /etc/os-release if ID_LIKE is not set
DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2)
fi
if [[ ${DISTRO_TYPE} =~ "debian" ]]; then
TELEPORT_FORMAT="deb"
elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then
TELEPORT_FORMAT="rpm"
else
log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer"
TELEPORT_FORMAT="tarball"
fi
fi
log "Detected distro type: ${DISTRO_TYPE}"
#suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu
if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then
SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service"
fi
fi
elif [[ "${OSTYPE}" == "darwin"* ]]; then
# macos host, now detect arch
TELEPORT_BINARY_TYPE="darwin"
ARCH=$(uname -m)
log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}"
if [[ ${ARCH} == "arm64" ]]; then
TELEPORT_ARCH="arm64"
elif [[ ${ARCH} == "x86_64" ]]; then
TELEPORT_ARCH="amd64"
else
log_important "Error: unsupported architecture from uname -m: ${ARCH}"
exit 1
fi
log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}"
TELEPORT_FORMAT="tarball"
else
log_important "Error - unsupported platform: ${OSTYPE}"
exit 1
fi
log "Using Teleport distribution: ${TELEPORT_FORMAT}"
# create temporary directory and exit cleanup logic
TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX)
log "Created temp dir ${TEMP_DIR}"
pushd "${TEMP_DIR}" >/dev/null 2>&1
finish() {
popd >/dev/null 2>&1
rm -rf "${TEMP_DIR}"
}
trap finish EXIT
# optional format override (mostly for testing)
if [[ ${OVERRIDE_FORMAT} != "" ]]; then
TELEPORT_FORMAT="${OVERRIDE_FORMAT}"
log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}"
fi
# check whether teleport is running already
# if it is, we exit gracefully with an error
if is_running_teleport; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
TELEPORT_PID=$(get_teleport_pid)
log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})"
log_cleanup_message
exit 1
else
log "Ignoring is_running_teleport as requested"
fi
fi
# check for existing config file
if teleport_config_exists; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}."
log_cleanup_message
exit 1
else
log "Ignoring teleport_config_exists as requested"
fi
fi
# check for existing data directory
if teleport_datadir_exists; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})."
log_cleanup_message
exit 1
else
log "Ignoring teleport_datadir_exists as requested"
fi
fi
# check for existing binaries
if teleport_binaries_exist; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}."
log_cleanup_message
exit 1
else
log "Ignoring teleport_binaries_exist as requested"
fi
fi
install_from_file() {
# select correct URL/installation method based on distro
if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz"
# check that needed tools are installed
check_exists_fatal curl tar
# download tarball
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# extract tarball
tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}"
# install binaries to /usr/local/bin
for BINARY in ${TELEPORT_BINARY_LIST}; do
${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/"
done
elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then
# convert teleport arch to deb arch
if [[ ${TELEPORT_ARCH} == "amd64" ]]; then
DEB_ARCH="amd64"
elif [[ ${TELEPORT_ARCH} == "386" ]]; then
DEB_ARCH="i386"
elif [[ ${TELEPORT_ARCH} == "arm" ]]; then
DEB_ARCH="arm"
elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then
DEB_ARCH="arm64"
fi
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb"
check_deb_not_already_installed
# check that needed tools are installed
check_exists_fatal curl dpkg
# download deb and register cleanup operation
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# install deb
log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}"
dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then
# convert teleport arch to rpm arch
if [[ ${TELEPORT_ARCH} == "amd64" ]]; then
RPM_ARCH="x86_64"
elif [[ ${TELEPORT_ARCH} == "386" ]]; then
RPM_ARCH="i386"
elif [[ ${TELEPORT_ARCH} == "arm" ]]; then
RPM_ARCH="arm"
elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then
RPM_ARCH="arm64"
fi
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm"
check_rpm_not_already_installed
# check for package managers
if check_exists dnf; then
log "Found 'dnf' package manager, using it"
PACKAGE_MANAGER_COMMAND="dnf -y install"
elif check_exists yum; then
log "Found 'yum' package manager, using it"
PACKAGE_MANAGER_COMMAND="yum -y localinstall"
else
PACKAGE_MANAGER_COMMAND=""
log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead"
fi
# check that needed tools are installed
check_exists_fatal curl
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# install with package manager if available
if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then
log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}"
# install rpm with package manager
${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# use rpm if we couldn't find a package manager
else
# install RPM (in upgrade mode)
log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}"
rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
fi
else
log_important "Can't figure out what Teleport format to use"
exit 1
fi
}
install_from_repo() {
if [[ "${REPO_CHANNEL}" == "" ]]; then
# By default, use the current version's channel.
REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}"
fi
# Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS.
# shellcheck disable=SC1091
. /etc/os-release
PACKAGE_LIST=$(package_list)
if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then
# old versions of ubuntu require that keys get added by `apt-key add`, without
# adding the key apt shows a key signing error when installing teleport.
if [[
($ID == "ubuntu" && $VERSION_ID == "16.04") || \
($ID == "debian" && $VERSION_ID == "9" )
]]; then
apt install apt-transport-https gnupg -y
curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add -
echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list
else
curl -fsSL https://apt.releases.teleport.dev/gpg \
-o /usr/share/keyrings/teleport-archive-keyring.asc
echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \
https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list
fi
apt-get update
apt-get install -y ${PACKAGE_LIST}
elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then
if [ "$ID" = "rhel" ]; then
VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version
fi
yum install -y yum-utils
yum-config-manager --add-repo \
"$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")"
# Remove metadata cache to prevent cache from other channel (eg, prior version)
# See: https://github.com/gravitational/teleport/issues/22581
yum --disablerepo="*" --enablerepo="teleport" clean metadata
yum install -y ${PACKAGE_LIST}
else
echo "Unsupported distro: $ID"
exit 1
fi
}
# package_list returns the list of packages to install.
# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions.
package_list() {
TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME}
TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater"
if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then
TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}"
TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}"
elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then
TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}"
TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}"
TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}"
fi
PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION}
# (warning): This expression is constant. Did you forget the $ on a variable?
# Disabling the warning above because expression is templated.
# shellcheck disable=SC2050
if is_using_systemd && [[ "false" == "true" ]]; then
# Teleport Updater requires systemd.
PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}"
fi
echo ${PACKAGE_LIST}
}
is_repo_available() {
if [[ "${OSTYPE}" != "linux-gnu" ]]; then
return 1
fi
# Populate $ID, $VERSION_ID and other env vars identifying the OS.
# shellcheck disable=SC1091
. /etc/os-release
# The following distros+version have a Teleport repository to install from.
case "${ID}-${VERSION_ID}" in
ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \
debian-9* | debian-10* | debian-11* | \
rhel-7* | rhel-8* | rhel-9* | \
centos-7* | centos-8* | centos-9* | \
amzn-2 | amzn-2023)
return 0;;
esac
return 1
}
if is_repo_available; then
log "Installing repo for distro $ID."
install_from_repo
else
log "Installing from binary file."
install_from_file
fi
# check that teleport binary can be found and runs
if ! check_teleport_binary; then
log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected."
log_important "This usually means that there was an error during installation."
log_important "Check this log for obvious signs of error and contact Teleport support"
log_important "for further assistance."
exit 1
fi
# install teleport config
# check the mode and write the appropriate config type
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
install_teleport_app_config
elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then
install_teleport_database_config
else
install_teleport_node_config
fi
# Used to track whether a Teleport agent was installed using this method.
export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true"
# install systemd unit if applicable (linux hosts)
if is_using_systemd; then
log "Host is using systemd"
# we only need to manually install the systemd config if teleport was installed via tarball
# all other packages will deploy it automatically
if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then
install_systemd_unit
fi
start_teleport_systemd
print_welcome_message
# install launchd config on MacOS hosts
elif is_macos_host; then
log "Host is running MacOS"
install_launchd_config
start_teleport_launchd
print_welcome_message
# not a MacOS host and no systemd available, print a warning
# and temporarily start Teleport in the foreground
else
log "Host does not appear to be using systemd"
no_systemd_warning
start_teleport_foreground
fi

View File

@@ -0,0 +1,995 @@
#!/bin/bash
set -euo pipefail
SCRIPT_NAME="teleport-installer"
# default values
ALIVE_CHECK_DELAY=3
CONNECTIVITY_TEST_METHOD=""
COPY_COMMAND="cp"
DISTRO_TYPE=""
IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}"
LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons"
LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)"
MACOS_STDERR_LOG="/var/log/teleport-stderr.log"
MACOS_STDOUT_LOG="/var/log/teleport-stdout.log"
SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service"
TARGET_PORT_DEFAULT=443
TELEPORT_ARCHIVE_PATH='teleport'
TELEPORT_BINARY_DIR="/usr/local/bin"
TELEPORT_BINARY_LIST="teleport tctl tsh"
TELEPORT_CONFIG_PATH="/etc/teleport.yaml"
TELEPORT_DATA_DIR="/var/lib/teleport"
TELEPORT_DOCS_URL="https://goteleport.com/docs/"
TELEPORT_FORMAT=""
# initialise variables (because set -u disallows unbound variables)
f=""
l=""
DISABLE_TLS_VERIFICATION=false
NODENAME=$(hostname)
IGNORE_CHECKS=false
OVERRIDE_FORMAT=""
QUIET=false
APP_INSTALL_DECISION=""
INTERACTIVE=false
# the default value of each variable is a templatable Go value so that it can
# optionally be replaced by the server before the script is served up
TELEPORT_VERSION='13.3.4'
TELEPORT_PACKAGE_NAME='teleport'
REPO_CHANNEL=''
TARGET_HOSTNAME='teleport.access.datasaker.io'
TARGET_PORT='443'
JOIN_TOKEN='b0a997e4c3c200b8152a3f3025548189'
JOIN_METHOD=''
JOIN_METHOD_FLAG=""
[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}"
# inject labels into the configuration
LABELS='teleport.internal/resource-id=89a1a61b-54f4-4e42-94d9-7ab2b1c1c847'
LABELS_FLAG=()
[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}")
# When all stanza generators have been updated to use the new
# `teleport <service> configure` commands CA_PIN_HASHES can be removed along
# with the script passing it in in `join_tokens.go`.
CA_PIN_HASHES='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8'
CA_PINS='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8'
ARG_CA_PIN_HASHES=""
APP_INSTALL_MODE='false'
APP_NAME=''
APP_URI=''
DB_INSTALL_MODE='false'
# usage message
# shellcheck disable=SC2086
usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; }
while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do
case "${o}" in
v) TELEPORT_VERSION=${OPTARG};;
h) TARGET_HOSTNAME=${OPTARG};;
p) TARGET_PORT=${OPTARG};;
j) JOIN_TOKEN=${OPTARG};;
c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";;
f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;;
q) QUIET=true;;
l) l=${OPTARG};;
i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";;
k) DISABLE_TLS_VERIFICATION=true;;
a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};;
u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};;
*) usage;;
esac
done
shift $((OPTIND-1))
if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then
CA_PIN_HASHES="${ARG_CA_PIN_HASHES}"
fi
# function to construct a go template variable
# go's template parser is a bit finicky, so we dynamically build the value one character at a time
construct_go_template() {
OUTPUT="{"
OUTPUT+="{"
OUTPUT+="."
OUTPUT+="${1}"
OUTPUT+="}"
OUTPUT+="}"
echo "${OUTPUT}"
}
# check whether we are root, exit if not
assert_running_as_root() {
if ! [ "$(id -u)" = 0 ]; then
echo "This script must be run as root." 1>&2
exit 1
fi
}
# function to check whether variables are either blank or set to the default go template value
# (because they haven't been set by the go script generator or a command line argument)
# returns 1 if the variable is set to a default/zero value
# returns 0 otherwise (i.e. it needs to be set interactively)
check_variable() {
VARIABLE_VALUE="${!1}"
GO_TEMPLATE_NAME=$(construct_go_template "${2}")
if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then
return 1
fi
return 0
}
# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes"
is_truthy() {
declare -a TRUTHY_VALUES
TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA")
CHECK_VALUE="$1"
for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done
return 1
}
# function to read input until the value you get is non-empty
read_nonblank_input() {
INPUT=""
VARIABLE_TO_ASSIGN="$1"
shift
PROMPT="$*"
until [[ "${INPUT}" != "" ]]; do
echo -n "${PROMPT}"
read -r INPUT
done
printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}"
}
# error if we're not root
assert_running_as_root
# set/read values interactively if not provided
# users will be prompted to enter their own value if all the following are true:
# - the current value is blank, or equal to the default Go template value
# - the value has not been provided by command line argument
! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): "
! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: "
! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; }
! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: "
! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): "
[ -n "${f}" ] && OVERRIDE_FORMAT=${f}
[ -n "${l}" ] && LOG_FILENAME=${l}
# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already),
# prompt the user to choose whether to enable app_service
if [[ "${INTERACTIVE}" == "true" ]]; then
if ! check_variable APP_INSTALL_MODE appInstallMode; then
APP_INSTALL_MODE="false"
echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] "
read -r APP_INSTALL_DECISION
if is_truthy "${APP_INSTALL_DECISION}"; then
APP_INSTALL_MODE="true"
fi
fi
fi
# prompt for extra needed values if we're running in app service mode
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): "
! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): "
# generate app public addr by concatenating values
APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}"
fi
# set default target port if value not provided
if [[ "${TARGET_PORT}" == "" ]]; then
TARGET_PORT=${TARGET_PORT_DEFAULT}
fi
# clear log file if provided
if [[ "${LOG_FILENAME}" != "" ]]; then
if [ -f "${LOG_FILENAME}" ]; then
echo -n "" > "${LOG_FILENAME}"
fi
fi
# log functions
log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; }
log() {
LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*"
if [[ ${QUIET} != "true" ]]; then
echo "${LOG_LINE}"
fi
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# writes a line with no timestamp or starting data, always prints
log_only() {
LOG_LINE="$*"
echo "${LOG_LINE}"
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# writes a line by itself as a header
log_header() {
LOG_LINE="$*"
echo ""
echo "${LOG_LINE}"
echo ""
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# important log lines, print even when -q (quiet) is passed
log_important() {
LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*"
echo "${LOG_LINE}"
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
log_cleanup_message() {
log_only "This script does not overwrite any existing settings or Teleport installations."
log_only "Please clean up by running any of the following steps as necessary:"
log_only "- stop any running Teleport processes"
log_only " - pkill -f teleport"
log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself"
log_only " - rm -rf ${TELEPORT_DATA_DIR}"
log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}"
log_only " - rm -f ${TELEPORT_CONFIG_PATH}"
log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}"
for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done
log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}"
log_only "Run this installer again when done."
log_only
}
# other functions
# check whether a named program exists
check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; }
# checks for the existence of a list of named binaries and exits with error if any of them don't exist
check_exists_fatal() {
for TOOL in "$@"; do
if ! check_exists "${TOOL}"; then
log_important "Error: cannot find ${TOOL} - it needs to be installed"
exit 1
fi
done
}
# check connectivity to the given host/port and make a request to see if Teleport is listening
# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return
# values aren't really a thing that exists in bash
check_connectivity() {
HOST=$1
PORT=$2
# check with nc
if check_exists nc; then
CONNECTIVITY_TEST_METHOD="nc"
if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi
# if there's no nc, check with telnet
elif check_exists telnet; then
CONNECTIVITY_TEST_METHOD="telnet"
if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi
# if there's no nc or telnet, try and use /dev/tcp
elif [ -f /dev/tcp ]; then
CONNECTIVITY_TEST_METHOD="/dev/tcp"
if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi
else
return 255
fi
}
# check whether a teleport DEB is already installed and exit with error if so
check_deb_not_already_installed() {
check_exists_fatal dpkg awk
DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true)
if [[ ${DEB_INSTALLED} != "" ]]; then
log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})."
log_important "You will need to remove that package before using this script."
exit 1
fi
}
# check whether a teleport RPM is already installed and exit with error if so
check_rpm_not_already_installed() {
check_exists_fatal rpm
RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true)
if [[ ${RPM_INSTALLED} != "" ]]; then
log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})."
log_important "You will need to remove that package before using this script."
exit 1
fi
}
# function to check if given variable is set
check_set() {
CHECK_KEY=${1} || true
CHECK_VALUE=${!1} || true
if [[ "${CHECK_VALUE}" == "" ]]; then
log "Required variable ${CHECK_KEY} is not set"
exit 1
else
log "${CHECK_KEY}: ${CHECK_VALUE}"
fi
}
# checks that teleport binary can be found in path and runs 'teleport version'
check_teleport_binary() {
FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version)
if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then
log "Cannot find Teleport binary"
return 1
else
log "Found: ${FOUND_TELEPORT_VERSION}";
return 0
fi
}
# wrapper to download with curl
download() {
URL=$1
OUTPUT_PATH=$2
CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5"
# optionally allow disabling of TLS verification (can be useful on older distros
# which often have an out-of-date set of CA certificate bundle which won't validate)
if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then
CURL_COMMAND+=" -k"
fi
log "Running ${CURL_COMMAND} ${URL}"
log "Downloading to ${OUTPUT_PATH}"
# handle errors with curl
if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then
log_important "curl error downloading ${URL}"
log "On an older OS, this may be related to the CA certificate bundle being too old."
log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!"
exit 1
fi
# check that the file has a non-zero size as an extra validation
check_exists_fatal wc xargs
FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)"
if [ "${FILE_SIZE}" -eq 0 ]; then
log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue."
exit 1
else
log "Downloaded file size: ${FILE_SIZE} bytes"
fi
# if we have a hashing utility installed, also download and validate the checksum
SHA_COMMAND=""
# shasum is installed by default on MacOS and some distros
if check_exists shasum; then
SHA_COMMAND="shasum -a 256"
# sha256sum is installed by default in some other distros
elif check_exists sha256sum; then
SHA_COMMAND="sha256sum"
fi
if [[ "${SHA_COMMAND}" != "" ]]; then
log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file"
SHA_URL="${URL}.sha256"
SHA_PATH="${OUTPUT_PATH}.sha256"
${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}"
if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then
log "The downloaded file's checksum validated correctly"
else
SHA_EXPECTED=$(cat "${SHA_PATH}")
SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}")
if check_exists awk; then
SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}')
SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}')
fi
log_important "Checksum of the downloaded file did not validate correctly"
log_important "Expected: ${SHA_EXPECTED}"
log_important "Got: ${SHA_ACTUAL}"
log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support."
exit 1
fi
else
log "shasum/sha256sum utilities not found, will skip checksum validation"
fi
}
# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz)
get_download_filename() { echo "${1##*/}"; }
# gets the pid of any running teleport process (and converts newlines to spaces)
get_teleport_pid() {
check_exists_fatal pgrep xargs
pgrep teleport | xargs echo
}
# returns a command which will start teleport using the config
get_teleport_start_command() {
echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}"
}
# installs the teleport-provided launchd config
install_launchd_config() {
log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}"
${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist
}
# installs the teleport-provided systemd unit
install_systemd_unit() {
log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}"
${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH}
log "Reloading unit files (systemctl daemon-reload)"
systemctl daemon-reload
}
# formats the arguments as a yaml list
get_yaml_list() {
name="${1}"
list="${2}"
indentation="${3}"
echo "${indentation}${name}:"
for item in ${list}; do
echo "${indentation}- ${item}"
done
}
# installs the provided teleport config (for app service)
install_teleport_app_config() {
log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}"
CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ")
cat << EOF > ${TELEPORT_CONFIG_PATH}
version: v3
teleport:
nodename: ${NODENAME}
auth_token: ${JOIN_TOKEN}
${CA_PINS_CONFIG}
proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT}
log:
output: stderr
severity: INFO
auth_service:
enabled: no
ssh_service:
enabled: no
proxy_service:
enabled: no
app_service:
enabled: yes
apps:
- name: "${APP_NAME}"
uri: "${APP_URI}"
public_addr: ${APP_PUBLIC_ADDR}
EOF
}
# installs the provided teleport config (for database service)
install_teleport_database_config() {
log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}"
CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ")
# This file is processed by `shellschek` as part of the lint step
# It detects an issue because of un-set variables - $index and $line. This check is called SC2154.
# However, that's not an issue, because those variables are replaced when we run go's text/template engine over it.
# When executing the script, those are no long variables but actual values.
# shellcheck disable=SC2154
cat << EOF > ${TELEPORT_CONFIG_PATH}
version: v3
teleport:
nodename: ${NODENAME}
auth_token: ${JOIN_TOKEN}
${CA_PINS_CONFIG}
proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT}
log:
output: stderr
severity: INFO
auth_service:
enabled: no
ssh_service:
enabled: no
proxy_service:
enabled: no
db_service:
enabled: "yes"
resources:
- labels:
EOF
}
# installs the provided teleport config (for node service)
install_teleport_node_config() {
log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}"
${TELEPORT_BINARY_DIR}/teleport node configure \
--token ${JOIN_TOKEN} \
${JOIN_METHOD_FLAG} \
--ca-pin ${CA_PINS} \
--proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \
"${LABELS_FLAG[@]}" \
--output ${TELEPORT_CONFIG_PATH}
}
# checks whether the given host is running MacOS
is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi }
# checks whether teleport is already running on the host
is_running_teleport() {
check_exists_fatal pgrep
TELEPORT_PID=$(get_teleport_pid)
if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi
}
# checks whether the given host is running systemd as its init system
is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi }
# prints a warning if the host isn't running systemd
no_systemd_warning() {
log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits."
log_important "Please investigate an alternative way to keep Teleport running."
log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}"
log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit."
log_only
log_only "Run this command to start Teleport in future:"
log_only "$(get_teleport_start_command)"
log_only
log_only "------------------------------------------------------------------------"
log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |"
log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |"
log_only "------------------------------------------------------------------------"
log_only
}
# print a message giving the name of the node and a link to the docs
# gives some debugging instructions if the service didn't start successfully
print_welcome_message() {
log_only ""
if is_running_teleport; then
log_only "Teleport has been started."
log_only ""
if is_using_systemd; then
log_only "View its status with 'sudo systemctl status teleport.service'"
log_only "View Teleport logs using 'sudo journalctl -u teleport.service'"
log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'"
log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'"
elif is_macos_host; then
log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'"
log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'"
log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'"
fi
log_only ""
log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'"
log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/"
else
log_important "The Teleport service was installed, but it does not appear to have started successfully."
if is_using_systemd; then
log_important "Check the Teleport service's status with 'systemctl status teleport.service'"
log_important "View Teleport logs with 'journalctl -u teleport.service'"
elif is_macos_host; then
log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'"
fi
log_important "Contact Teleport support for further assistance."
fi
log_only ""
}
# start teleport in foreground (when there's no systemd)
start_teleport_foreground() {
log "Starting Teleport in the foreground"
# shellcheck disable=SC2091
$(get_teleport_start_command)
}
# start teleport via launchd (after installing config)
start_teleport_launchd() {
log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots."
launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist
sleep ${ALIVE_CHECK_DELAY}
}
# start teleport via systemd (after installing unit)
start_teleport_systemd() {
log "Starting Teleport via systemd. It will automatically be started whenever the system reboots."
systemctl enable teleport.service
systemctl start teleport.service
sleep ${ALIVE_CHECK_DELAY}
}
# checks whether teleport binaries exist on the host
teleport_binaries_exist() {
for BINARY_NAME in teleport tctl tsh; do
if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi
done
}
# checks whether a teleport config exists on the host
teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; }
# checks whether a teleport data dir exists on the host
teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; }
# error out if any required values are not set
check_set TELEPORT_VERSION
check_set TARGET_HOSTNAME
check_set TARGET_PORT
check_set JOIN_TOKEN
check_set CA_PIN_HASHES
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
check_set APP_NAME
check_set APP_URI
check_set APP_PUBLIC_ADDR
fi
###
# main script starts here
###
# check connectivity to teleport server/port
if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then
log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check"
else
log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})"
if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then
# if we don't have a connectivity test method assigned, we know we couldn't run the test
if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then
log "Couldn't find nc, telnet or /dev/tcp to do a connection test"
log "Going to blindly continue without testing connectivity"
else
log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}"
log_important "This issue will need to be fixed before the script can continue."
log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script."
exit 1
fi
else
log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good"
fi
fi
# use OSTYPE variable to figure out host type/arch
if [[ "${OSTYPE}" == "linux-gnu"* ]]; then
# linux host, now detect arch
TELEPORT_BINARY_TYPE="linux"
ARCH=$(uname -m)
log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}"
if [[ ${ARCH} == "armv7l" ]]; then
TELEPORT_ARCH="arm"
elif [[ ${ARCH} == "aarch64" ]]; then
TELEPORT_ARCH="arm64"
elif [[ ${ARCH} == "x86_64" ]]; then
TELEPORT_ARCH="amd64"
elif [[ ${ARCH} == "i686" ]]; then
TELEPORT_ARCH="386"
else
log_important "Error: cannot detect architecture from uname -m: ${ARCH}"
exit 1
fi
log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}"
# if the download format is already set, we have no need to detect distro
if [[ ${TELEPORT_FORMAT} == "" ]]; then
# detect distro
# if /etc/os-release doesn't exist, we need to use some other logic
if [ ! -f /etc/os-release ]; then
if [ -f /etc/centos-release ]; then
if grep -q 'CentOS release 6' /etc/centos-release; then
log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]"
log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low."
exit 1
fi
elif [ -f /etc/redhat-release ]; then
if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then
log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]"
log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low."
exit 1
elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then
log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]"
log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low."
exit 1
fi
fi
# use ID_LIKE value from /etc/os-release (if set)
# this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc
else
check_exists_fatal cut
DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true
if [[ ${DISTRO_TYPE} == "" ]]; then
# use exact ID value from /etc/os-release if ID_LIKE is not set
DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2)
fi
if [[ ${DISTRO_TYPE} =~ "debian" ]]; then
TELEPORT_FORMAT="deb"
elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then
TELEPORT_FORMAT="rpm"
else
log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer"
TELEPORT_FORMAT="tarball"
fi
fi
log "Detected distro type: ${DISTRO_TYPE}"
#suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu
if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then
SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service"
fi
fi
elif [[ "${OSTYPE}" == "darwin"* ]]; then
# macos host, now detect arch
TELEPORT_BINARY_TYPE="darwin"
ARCH=$(uname -m)
log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}"
if [[ ${ARCH} == "arm64" ]]; then
TELEPORT_ARCH="arm64"
elif [[ ${ARCH} == "x86_64" ]]; then
TELEPORT_ARCH="amd64"
else
log_important "Error: unsupported architecture from uname -m: ${ARCH}"
exit 1
fi
log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}"
TELEPORT_FORMAT="tarball"
else
log_important "Error - unsupported platform: ${OSTYPE}"
exit 1
fi
log "Using Teleport distribution: ${TELEPORT_FORMAT}"
# create temporary directory and exit cleanup logic
TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX)
log "Created temp dir ${TEMP_DIR}"
pushd "${TEMP_DIR}" >/dev/null 2>&1
finish() {
popd >/dev/null 2>&1
rm -rf "${TEMP_DIR}"
}
trap finish EXIT
# optional format override (mostly for testing)
if [[ ${OVERRIDE_FORMAT} != "" ]]; then
TELEPORT_FORMAT="${OVERRIDE_FORMAT}"
log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}"
fi
# check whether teleport is running already
# if it is, we exit gracefully with an error
if is_running_teleport; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
TELEPORT_PID=$(get_teleport_pid)
log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})"
log_cleanup_message
exit 1
else
log "Ignoring is_running_teleport as requested"
fi
fi
# check for existing config file
if teleport_config_exists; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}."
log_cleanup_message
exit 1
else
log "Ignoring teleport_config_exists as requested"
fi
fi
# check for existing data directory
if teleport_datadir_exists; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})."
log_cleanup_message
exit 1
else
log "Ignoring teleport_datadir_exists as requested"
fi
fi
# check for existing binaries
if teleport_binaries_exist; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}."
log_cleanup_message
exit 1
else
log "Ignoring teleport_binaries_exist as requested"
fi
fi
install_from_file() {
# select correct URL/installation method based on distro
if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz"
# check that needed tools are installed
check_exists_fatal curl tar
# download tarball
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# extract tarball
tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}"
# install binaries to /usr/local/bin
for BINARY in ${TELEPORT_BINARY_LIST}; do
${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/"
done
elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then
# convert teleport arch to deb arch
if [[ ${TELEPORT_ARCH} == "amd64" ]]; then
DEB_ARCH="amd64"
elif [[ ${TELEPORT_ARCH} == "386" ]]; then
DEB_ARCH="i386"
elif [[ ${TELEPORT_ARCH} == "arm" ]]; then
DEB_ARCH="arm"
elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then
DEB_ARCH="arm64"
fi
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb"
check_deb_not_already_installed
# check that needed tools are installed
check_exists_fatal curl dpkg
# download deb and register cleanup operation
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# install deb
log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}"
dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then
# convert teleport arch to rpm arch
if [[ ${TELEPORT_ARCH} == "amd64" ]]; then
RPM_ARCH="x86_64"
elif [[ ${TELEPORT_ARCH} == "386" ]]; then
RPM_ARCH="i386"
elif [[ ${TELEPORT_ARCH} == "arm" ]]; then
RPM_ARCH="arm"
elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then
RPM_ARCH="arm64"
fi
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm"
check_rpm_not_already_installed
# check for package managers
if check_exists dnf; then
log "Found 'dnf' package manager, using it"
PACKAGE_MANAGER_COMMAND="dnf -y install"
elif check_exists yum; then
log "Found 'yum' package manager, using it"
PACKAGE_MANAGER_COMMAND="yum -y localinstall"
else
PACKAGE_MANAGER_COMMAND=""
log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead"
fi
# check that needed tools are installed
check_exists_fatal curl
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# install with package manager if available
if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then
log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}"
# install rpm with package manager
${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# use rpm if we couldn't find a package manager
else
# install RPM (in upgrade mode)
log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}"
rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
fi
else
log_important "Can't figure out what Teleport format to use"
exit 1
fi
}
install_from_repo() {
if [[ "${REPO_CHANNEL}" == "" ]]; then
# By default, use the current version's channel.
REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}"
fi
# Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS.
# shellcheck disable=SC1091
. /etc/os-release
PACKAGE_LIST=$(package_list)
if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then
# old versions of ubuntu require that keys get added by `apt-key add`, without
# adding the key apt shows a key signing error when installing teleport.
if [[
($ID == "ubuntu" && $VERSION_ID == "16.04") || \
($ID == "debian" && $VERSION_ID == "9" )
]]; then
apt install apt-transport-https gnupg -y
curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add -
echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list
else
curl -fsSL https://apt.releases.teleport.dev/gpg \
-o /usr/share/keyrings/teleport-archive-keyring.asc
echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \
https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list
fi
apt-get update
apt-get install -y ${PACKAGE_LIST}
elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then
if [ "$ID" = "rhel" ]; then
VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version
fi
yum install -y yum-utils
yum-config-manager --add-repo \
"$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")"
# Remove metadata cache to prevent cache from other channel (eg, prior version)
# See: https://github.com/gravitational/teleport/issues/22581
yum --disablerepo="*" --enablerepo="teleport" clean metadata
yum install -y ${PACKAGE_LIST}
else
echo "Unsupported distro: $ID"
exit 1
fi
}
# package_list returns the list of packages to install.
# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions.
package_list() {
TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME}
TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater"
if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then
TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}"
TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}"
elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then
TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}"
TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}"
TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}"
fi
PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION}
# (warning): This expression is constant. Did you forget the $ on a variable?
# Disabling the warning above because expression is templated.
# shellcheck disable=SC2050
if is_using_systemd && [[ "false" == "true" ]]; then
# Teleport Updater requires systemd.
PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}"
fi
echo ${PACKAGE_LIST}
}
is_repo_available() {
if [[ "${OSTYPE}" != "linux-gnu" ]]; then
return 1
fi
# Populate $ID, $VERSION_ID and other env vars identifying the OS.
# shellcheck disable=SC1091
. /etc/os-release
# The following distros+version have a Teleport repository to install from.
case "${ID}-${VERSION_ID}" in
ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \
debian-9* | debian-10* | debian-11* | \
rhel-7* | rhel-8* | rhel-9* | \
centos-7* | centos-8* | centos-9* | \
amzn-2 | amzn-2023)
return 0;;
esac
return 1
}
if is_repo_available; then
log "Installing repo for distro $ID."
install_from_repo
else
log "Installing from binary file."
install_from_file
fi
# check that teleport binary can be found and runs
if ! check_teleport_binary; then
log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected."
log_important "This usually means that there was an error during installation."
log_important "Check this log for obvious signs of error and contact Teleport support"
log_important "for further assistance."
exit 1
fi
# install teleport config
# check the mode and write the appropriate config type
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
install_teleport_app_config
elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then
install_teleport_database_config
else
install_teleport_node_config
fi
# Used to track whether a Teleport agent was installed using this method.
export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true"
# install systemd unit if applicable (linux hosts)
if is_using_systemd; then
log "Host is using systemd"
# we only need to manually install the systemd config if teleport was installed via tarball
# all other packages will deploy it automatically
if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then
install_systemd_unit
fi
start_teleport_systemd
print_welcome_message
# install launchd config on MacOS hosts
elif is_macos_host; then
log "Host is running MacOS"
install_launchd_config
start_teleport_launchd
print_welcome_message
# not a MacOS host and no systemd available, print a warning
# and temporarily start Teleport in the foreground
else
log "Host does not appear to be using systemd"
no_systemd_warning
start_teleport_foreground
fi

View File

@@ -0,0 +1,995 @@
#!/bin/bash
set -euo pipefail
SCRIPT_NAME="teleport-installer"
# default values
ALIVE_CHECK_DELAY=3
CONNECTIVITY_TEST_METHOD=""
COPY_COMMAND="cp"
DISTRO_TYPE=""
IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}"
LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons"
LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)"
MACOS_STDERR_LOG="/var/log/teleport-stderr.log"
MACOS_STDOUT_LOG="/var/log/teleport-stdout.log"
SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service"
TARGET_PORT_DEFAULT=443
TELEPORT_ARCHIVE_PATH='teleport'
TELEPORT_BINARY_DIR="/usr/local/bin"
TELEPORT_BINARY_LIST="teleport tctl tsh"
TELEPORT_CONFIG_PATH="/etc/teleport.yaml"
TELEPORT_DATA_DIR="/var/lib/teleport"
TELEPORT_DOCS_URL="https://goteleport.com/docs/"
TELEPORT_FORMAT=""
# initialise variables (because set -u disallows unbound variables)
f=""
l=""
DISABLE_TLS_VERIFICATION=false
NODENAME=$(hostname)
IGNORE_CHECKS=false
OVERRIDE_FORMAT=""
QUIET=false
APP_INSTALL_DECISION=""
INTERACTIVE=false
# the default value of each variable is a templatable Go value so that it can
# optionally be replaced by the server before the script is served up
TELEPORT_VERSION='13.3.4'
TELEPORT_PACKAGE_NAME='teleport'
REPO_CHANNEL=''
TARGET_HOSTNAME='teleport.datasaker.io'
TARGET_PORT='443'
JOIN_TOKEN='2df40c1ac8f47d7b155a92c134a77a84'
JOIN_METHOD=''
JOIN_METHOD_FLAG=""
[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}"
# inject labels into the configuration
LABELS='teleport.internal/resource-id=0ec993a8-b1ec-4fa6-8fc5-4e73e3e5306e'
LABELS_FLAG=()
[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}")
# When all stanza generators have been updated to use the new
# `teleport <service> configure` commands CA_PIN_HASHES can be removed along
# with the script passing it in in `join_tokens.go`.
CA_PIN_HASHES='sha256:fcd4bcd57c9a2a7bd68c4140c2c46c0131cd31567c3e0c87e6b12258aa190836'
CA_PINS='sha256:fcd4bcd57c9a2a7bd68c4140c2c46c0131cd31567c3e0c87e6b12258aa190836'
ARG_CA_PIN_HASHES=""
APP_INSTALL_MODE='false'
APP_NAME=''
APP_URI=''
DB_INSTALL_MODE='false'
# usage message
# shellcheck disable=SC2086
usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; }
while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do
case "${o}" in
v) TELEPORT_VERSION=${OPTARG};;
h) TARGET_HOSTNAME=${OPTARG};;
p) TARGET_PORT=${OPTARG};;
j) JOIN_TOKEN=${OPTARG};;
c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";;
f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;;
q) QUIET=true;;
l) l=${OPTARG};;
i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";;
k) DISABLE_TLS_VERIFICATION=true;;
a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};;
u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};;
*) usage;;
esac
done
shift $((OPTIND-1))
if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then
CA_PIN_HASHES="${ARG_CA_PIN_HASHES}"
fi
# function to construct a go template variable
# go's template parser is a bit finicky, so we dynamically build the value one character at a time
construct_go_template() {
OUTPUT="{"
OUTPUT+="{"
OUTPUT+="."
OUTPUT+="${1}"
OUTPUT+="}"
OUTPUT+="}"
echo "${OUTPUT}"
}
# check whether we are root, exit if not
assert_running_as_root() {
if ! [ "$(id -u)" = 0 ]; then
echo "This script must be run as root." 1>&2
exit 1
fi
}
# function to check whether variables are either blank or set to the default go template value
# (because they haven't been set by the go script generator or a command line argument)
# returns 1 if the variable is set to a default/zero value
# returns 0 otherwise (i.e. it needs to be set interactively)
check_variable() {
VARIABLE_VALUE="${!1}"
GO_TEMPLATE_NAME=$(construct_go_template "${2}")
if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then
return 1
fi
return 0
}
# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes"
is_truthy() {
declare -a TRUTHY_VALUES
TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA")
CHECK_VALUE="$1"
for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done
return 1
}
# function to read input until the value you get is non-empty
read_nonblank_input() {
INPUT=""
VARIABLE_TO_ASSIGN="$1"
shift
PROMPT="$*"
until [[ "${INPUT}" != "" ]]; do
echo -n "${PROMPT}"
read -r INPUT
done
printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}"
}
# error if we're not root
assert_running_as_root
# set/read values interactively if not provided
# users will be prompted to enter their own value if all the following are true:
# - the current value is blank, or equal to the default Go template value
# - the value has not been provided by command line argument
! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): "
! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: "
! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; }
! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: "
! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): "
[ -n "${f}" ] && OVERRIDE_FORMAT=${f}
[ -n "${l}" ] && LOG_FILENAME=${l}
# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already),
# prompt the user to choose whether to enable app_service
if [[ "${INTERACTIVE}" == "true" ]]; then
if ! check_variable APP_INSTALL_MODE appInstallMode; then
APP_INSTALL_MODE="false"
echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] "
read -r APP_INSTALL_DECISION
if is_truthy "${APP_INSTALL_DECISION}"; then
APP_INSTALL_MODE="true"
fi
fi
fi
# prompt for extra needed values if we're running in app service mode
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): "
! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): "
# generate app public addr by concatenating values
APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}"
fi
# set default target port if value not provided
if [[ "${TARGET_PORT}" == "" ]]; then
TARGET_PORT=${TARGET_PORT_DEFAULT}
fi
# clear log file if provided
if [[ "${LOG_FILENAME}" != "" ]]; then
if [ -f "${LOG_FILENAME}" ]; then
echo -n "" > "${LOG_FILENAME}"
fi
fi
# log functions
log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; }
log() {
LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*"
if [[ ${QUIET} != "true" ]]; then
echo "${LOG_LINE}"
fi
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# writes a line with no timestamp or starting data, always prints
log_only() {
LOG_LINE="$*"
echo "${LOG_LINE}"
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# writes a line by itself as a header
log_header() {
LOG_LINE="$*"
echo ""
echo "${LOG_LINE}"
echo ""
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
# important log lines, print even when -q (quiet) is passed
log_important() {
LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*"
echo "${LOG_LINE}"
if [[ "${LOG_FILENAME}" != "" ]]; then
echo "${LOG_LINE}" >> "${LOG_FILENAME}"
fi
}
log_cleanup_message() {
log_only "This script does not overwrite any existing settings or Teleport installations."
log_only "Please clean up by running any of the following steps as necessary:"
log_only "- stop any running Teleport processes"
log_only " - pkill -f teleport"
log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself"
log_only " - rm -rf ${TELEPORT_DATA_DIR}"
log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}"
log_only " - rm -f ${TELEPORT_CONFIG_PATH}"
log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}"
for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done
log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}"
log_only "Run this installer again when done."
log_only
}
# other functions
# check whether a named program exists
check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; }
# checks for the existence of a list of named binaries and exits with error if any of them don't exist
check_exists_fatal() {
for TOOL in "$@"; do
if ! check_exists "${TOOL}"; then
log_important "Error: cannot find ${TOOL} - it needs to be installed"
exit 1
fi
done
}
# check connectivity to the given host/port and make a request to see if Teleport is listening
# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return
# values aren't really a thing that exists in bash
check_connectivity() {
HOST=$1
PORT=$2
# check with nc
if check_exists nc; then
CONNECTIVITY_TEST_METHOD="nc"
if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi
# if there's no nc, check with telnet
elif check_exists telnet; then
CONNECTIVITY_TEST_METHOD="telnet"
if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi
# if there's no nc or telnet, try and use /dev/tcp
elif [ -f /dev/tcp ]; then
CONNECTIVITY_TEST_METHOD="/dev/tcp"
if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi
else
return 255
fi
}
# check whether a teleport DEB is already installed and exit with error if so
check_deb_not_already_installed() {
check_exists_fatal dpkg awk
DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true)
if [[ ${DEB_INSTALLED} != "" ]]; then
log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})."
log_important "You will need to remove that package before using this script."
exit 1
fi
}
# check whether a teleport RPM is already installed and exit with error if so
check_rpm_not_already_installed() {
check_exists_fatal rpm
RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true)
if [[ ${RPM_INSTALLED} != "" ]]; then
log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})."
log_important "You will need to remove that package before using this script."
exit 1
fi
}
# function to check if given variable is set
check_set() {
CHECK_KEY=${1} || true
CHECK_VALUE=${!1} || true
if [[ "${CHECK_VALUE}" == "" ]]; then
log "Required variable ${CHECK_KEY} is not set"
exit 1
else
log "${CHECK_KEY}: ${CHECK_VALUE}"
fi
}
# checks that teleport binary can be found in path and runs 'teleport version'
check_teleport_binary() {
FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version)
if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then
log "Cannot find Teleport binary"
return 1
else
log "Found: ${FOUND_TELEPORT_VERSION}";
return 0
fi
}
# wrapper to download with curl
download() {
URL=$1
OUTPUT_PATH=$2
CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5"
# optionally allow disabling of TLS verification (can be useful on older distros
# which often have an out-of-date set of CA certificate bundle which won't validate)
if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then
CURL_COMMAND+=" -k"
fi
log "Running ${CURL_COMMAND} ${URL}"
log "Downloading to ${OUTPUT_PATH}"
# handle errors with curl
if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then
log_important "curl error downloading ${URL}"
log "On an older OS, this may be related to the CA certificate bundle being too old."
log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!"
exit 1
fi
# check that the file has a non-zero size as an extra validation
check_exists_fatal wc xargs
FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)"
if [ "${FILE_SIZE}" -eq 0 ]; then
log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue."
exit 1
else
log "Downloaded file size: ${FILE_SIZE} bytes"
fi
# if we have a hashing utility installed, also download and validate the checksum
SHA_COMMAND=""
# shasum is installed by default on MacOS and some distros
if check_exists shasum; then
SHA_COMMAND="shasum -a 256"
# sha256sum is installed by default in some other distros
elif check_exists sha256sum; then
SHA_COMMAND="sha256sum"
fi
if [[ "${SHA_COMMAND}" != "" ]]; then
log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file"
SHA_URL="${URL}.sha256"
SHA_PATH="${OUTPUT_PATH}.sha256"
${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}"
if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then
log "The downloaded file's checksum validated correctly"
else
SHA_EXPECTED=$(cat "${SHA_PATH}")
SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}")
if check_exists awk; then
SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}')
SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}')
fi
log_important "Checksum of the downloaded file did not validate correctly"
log_important "Expected: ${SHA_EXPECTED}"
log_important "Got: ${SHA_ACTUAL}"
log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support."
exit 1
fi
else
log "shasum/sha256sum utilities not found, will skip checksum validation"
fi
}
# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz)
get_download_filename() { echo "${1##*/}"; }
# gets the pid of any running teleport process (and converts newlines to spaces)
get_teleport_pid() {
check_exists_fatal pgrep xargs
pgrep teleport | xargs echo
}
# returns a command which will start teleport using the config
get_teleport_start_command() {
echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}"
}
# installs the teleport-provided launchd config
install_launchd_config() {
log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}"
${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist
}
# installs the teleport-provided systemd unit
install_systemd_unit() {
log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}"
${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH}
log "Reloading unit files (systemctl daemon-reload)"
systemctl daemon-reload
}
# formats the arguments as a yaml list
get_yaml_list() {
name="${1}"
list="${2}"
indentation="${3}"
echo "${indentation}${name}:"
for item in ${list}; do
echo "${indentation}- ${item}"
done
}
# installs the provided teleport config (for app service)
install_teleport_app_config() {
log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}"
CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ")
cat << EOF > ${TELEPORT_CONFIG_PATH}
version: v3
teleport:
nodename: ${NODENAME}
auth_token: ${JOIN_TOKEN}
${CA_PINS_CONFIG}
proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT}
log:
output: stderr
severity: INFO
auth_service:
enabled: no
ssh_service:
enabled: no
proxy_service:
enabled: no
app_service:
enabled: yes
apps:
- name: "${APP_NAME}"
uri: "${APP_URI}"
public_addr: ${APP_PUBLIC_ADDR}
EOF
}
# installs the provided teleport config (for database service)
install_teleport_database_config() {
log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}"
CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ")
# This file is processed by `shellschek` as part of the lint step
# It detects an issue because of un-set variables - $index and $line. This check is called SC2154.
# However, that's not an issue, because those variables are replaced when we run go's text/template engine over it.
# When executing the script, those are no long variables but actual values.
# shellcheck disable=SC2154
cat << EOF > ${TELEPORT_CONFIG_PATH}
version: v3
teleport:
nodename: ${NODENAME}
auth_token: ${JOIN_TOKEN}
${CA_PINS_CONFIG}
proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT}
log:
output: stderr
severity: INFO
auth_service:
enabled: no
ssh_service:
enabled: no
proxy_service:
enabled: no
db_service:
enabled: "yes"
resources:
- labels:
EOF
}
# installs the provided teleport config (for node service)
install_teleport_node_config() {
log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}"
${TELEPORT_BINARY_DIR}/teleport node configure \
--token ${JOIN_TOKEN} \
${JOIN_METHOD_FLAG} \
--ca-pin ${CA_PINS} \
--proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \
"${LABELS_FLAG[@]}" \
--output ${TELEPORT_CONFIG_PATH}
}
# checks whether the given host is running MacOS
is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi }
# checks whether teleport is already running on the host
is_running_teleport() {
check_exists_fatal pgrep
TELEPORT_PID=$(get_teleport_pid)
if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi
}
# checks whether the given host is running systemd as its init system
is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi }
# prints a warning if the host isn't running systemd
no_systemd_warning() {
log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits."
log_important "Please investigate an alternative way to keep Teleport running."
log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}"
log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit."
log_only
log_only "Run this command to start Teleport in future:"
log_only "$(get_teleport_start_command)"
log_only
log_only "------------------------------------------------------------------------"
log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |"
log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |"
log_only "------------------------------------------------------------------------"
log_only
}
# print a message giving the name of the node and a link to the docs
# gives some debugging instructions if the service didn't start successfully
print_welcome_message() {
log_only ""
if is_running_teleport; then
log_only "Teleport has been started."
log_only ""
if is_using_systemd; then
log_only "View its status with 'sudo systemctl status teleport.service'"
log_only "View Teleport logs using 'sudo journalctl -u teleport.service'"
log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'"
log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'"
elif is_macos_host; then
log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'"
log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'"
log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'"
fi
log_only ""
log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'"
log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/"
else
log_important "The Teleport service was installed, but it does not appear to have started successfully."
if is_using_systemd; then
log_important "Check the Teleport service's status with 'systemctl status teleport.service'"
log_important "View Teleport logs with 'journalctl -u teleport.service'"
elif is_macos_host; then
log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'"
fi
log_important "Contact Teleport support for further assistance."
fi
log_only ""
}
# start teleport in foreground (when there's no systemd)
start_teleport_foreground() {
log "Starting Teleport in the foreground"
# shellcheck disable=SC2091
$(get_teleport_start_command)
}
# start teleport via launchd (after installing config)
start_teleport_launchd() {
log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots."
launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist
sleep ${ALIVE_CHECK_DELAY}
}
# start teleport via systemd (after installing unit)
start_teleport_systemd() {
log "Starting Teleport via systemd. It will automatically be started whenever the system reboots."
systemctl enable teleport.service
systemctl start teleport.service
sleep ${ALIVE_CHECK_DELAY}
}
# checks whether teleport binaries exist on the host
teleport_binaries_exist() {
for BINARY_NAME in teleport tctl tsh; do
if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi
done
}
# checks whether a teleport config exists on the host
teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; }
# checks whether a teleport data dir exists on the host
teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; }
# error out if any required values are not set
check_set TELEPORT_VERSION
check_set TARGET_HOSTNAME
check_set TARGET_PORT
check_set JOIN_TOKEN
check_set CA_PIN_HASHES
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
check_set APP_NAME
check_set APP_URI
check_set APP_PUBLIC_ADDR
fi
###
# main script starts here
###
# check connectivity to teleport server/port
if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then
log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check"
else
log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})"
if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then
# if we don't have a connectivity test method assigned, we know we couldn't run the test
if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then
log "Couldn't find nc, telnet or /dev/tcp to do a connection test"
log "Going to blindly continue without testing connectivity"
else
log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}"
log_important "This issue will need to be fixed before the script can continue."
log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script."
exit 1
fi
else
log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good"
fi
fi
# use OSTYPE variable to figure out host type/arch
if [[ "${OSTYPE}" == "linux-gnu"* ]]; then
# linux host, now detect arch
TELEPORT_BINARY_TYPE="linux"
ARCH=$(uname -m)
log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}"
if [[ ${ARCH} == "armv7l" ]]; then
TELEPORT_ARCH="arm"
elif [[ ${ARCH} == "aarch64" ]]; then
TELEPORT_ARCH="arm64"
elif [[ ${ARCH} == "x86_64" ]]; then
TELEPORT_ARCH="amd64"
elif [[ ${ARCH} == "i686" ]]; then
TELEPORT_ARCH="386"
else
log_important "Error: cannot detect architecture from uname -m: ${ARCH}"
exit 1
fi
log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}"
# if the download format is already set, we have no need to detect distro
if [[ ${TELEPORT_FORMAT} == "" ]]; then
# detect distro
# if /etc/os-release doesn't exist, we need to use some other logic
if [ ! -f /etc/os-release ]; then
if [ -f /etc/centos-release ]; then
if grep -q 'CentOS release 6' /etc/centos-release; then
log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]"
log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low."
exit 1
fi
elif [ -f /etc/redhat-release ]; then
if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then
log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]"
log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low."
exit 1
elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then
log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]"
log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low."
exit 1
fi
fi
# use ID_LIKE value from /etc/os-release (if set)
# this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc
else
check_exists_fatal cut
DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true
if [[ ${DISTRO_TYPE} == "" ]]; then
# use exact ID value from /etc/os-release if ID_LIKE is not set
DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2)
fi
if [[ ${DISTRO_TYPE} =~ "debian" ]]; then
TELEPORT_FORMAT="deb"
elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then
TELEPORT_FORMAT="rpm"
else
log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer"
TELEPORT_FORMAT="tarball"
fi
fi
log "Detected distro type: ${DISTRO_TYPE}"
#suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu
if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then
SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service"
fi
fi
elif [[ "${OSTYPE}" == "darwin"* ]]; then
# macos host, now detect arch
TELEPORT_BINARY_TYPE="darwin"
ARCH=$(uname -m)
log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}"
if [[ ${ARCH} == "arm64" ]]; then
TELEPORT_ARCH="arm64"
elif [[ ${ARCH} == "x86_64" ]]; then
TELEPORT_ARCH="amd64"
else
log_important "Error: unsupported architecture from uname -m: ${ARCH}"
exit 1
fi
log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}"
TELEPORT_FORMAT="tarball"
else
log_important "Error - unsupported platform: ${OSTYPE}"
exit 1
fi
log "Using Teleport distribution: ${TELEPORT_FORMAT}"
# create temporary directory and exit cleanup logic
TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX)
log "Created temp dir ${TEMP_DIR}"
pushd "${TEMP_DIR}" >/dev/null 2>&1
finish() {
popd >/dev/null 2>&1
rm -rf "${TEMP_DIR}"
}
trap finish EXIT
# optional format override (mostly for testing)
if [[ ${OVERRIDE_FORMAT} != "" ]]; then
TELEPORT_FORMAT="${OVERRIDE_FORMAT}"
log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}"
fi
# check whether teleport is running already
# if it is, we exit gracefully with an error
if is_running_teleport; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
TELEPORT_PID=$(get_teleport_pid)
log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})"
log_cleanup_message
exit 1
else
log "Ignoring is_running_teleport as requested"
fi
fi
# check for existing config file
if teleport_config_exists; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}."
log_cleanup_message
exit 1
else
log "Ignoring teleport_config_exists as requested"
fi
fi
# check for existing data directory
if teleport_datadir_exists; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})."
log_cleanup_message
exit 1
else
log "Ignoring teleport_datadir_exists as requested"
fi
fi
# check for existing binaries
if teleport_binaries_exist; then
if [[ ${IGNORE_CHECKS} != "true" ]]; then
log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}."
log_cleanup_message
exit 1
else
log "Ignoring teleport_binaries_exist as requested"
fi
fi
install_from_file() {
# select correct URL/installation method based on distro
if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz"
# check that needed tools are installed
check_exists_fatal curl tar
# download tarball
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# extract tarball
tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}"
# install binaries to /usr/local/bin
for BINARY in ${TELEPORT_BINARY_LIST}; do
${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/"
done
elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then
# convert teleport arch to deb arch
if [[ ${TELEPORT_ARCH} == "amd64" ]]; then
DEB_ARCH="amd64"
elif [[ ${TELEPORT_ARCH} == "386" ]]; then
DEB_ARCH="i386"
elif [[ ${TELEPORT_ARCH} == "arm" ]]; then
DEB_ARCH="arm"
elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then
DEB_ARCH="arm64"
fi
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb"
check_deb_not_already_installed
# check that needed tools are installed
check_exists_fatal curl dpkg
# download deb and register cleanup operation
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# install deb
log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}"
dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then
# convert teleport arch to rpm arch
if [[ ${TELEPORT_ARCH} == "amd64" ]]; then
RPM_ARCH="x86_64"
elif [[ ${TELEPORT_ARCH} == "386" ]]; then
RPM_ARCH="i386"
elif [[ ${TELEPORT_ARCH} == "arm" ]]; then
RPM_ARCH="arm"
elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then
RPM_ARCH="arm64"
fi
URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm"
check_rpm_not_already_installed
# check for package managers
if check_exists dnf; then
log "Found 'dnf' package manager, using it"
PACKAGE_MANAGER_COMMAND="dnf -y install"
elif check_exists yum; then
log "Found 'yum' package manager, using it"
PACKAGE_MANAGER_COMMAND="yum -y localinstall"
else
PACKAGE_MANAGER_COMMAND=""
log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead"
fi
# check that needed tools are installed
check_exists_fatal curl
log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}"
DOWNLOAD_FILENAME=$(get_download_filename "${URL}")
download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# install with package manager if available
if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then
log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}"
# install rpm with package manager
${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
# use rpm if we couldn't find a package manager
else
# install RPM (in upgrade mode)
log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}"
rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}"
fi
else
log_important "Can't figure out what Teleport format to use"
exit 1
fi
}
install_from_repo() {
if [[ "${REPO_CHANNEL}" == "" ]]; then
# By default, use the current version's channel.
REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}"
fi
# Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS.
# shellcheck disable=SC1091
. /etc/os-release
PACKAGE_LIST=$(package_list)
if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then
# old versions of ubuntu require that keys get added by `apt-key add`, without
# adding the key apt shows a key signing error when installing teleport.
if [[
($ID == "ubuntu" && $VERSION_ID == "16.04") || \
($ID == "debian" && $VERSION_ID == "9" )
]]; then
apt install apt-transport-https gnupg -y
curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add -
echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list
else
curl -fsSL https://apt.releases.teleport.dev/gpg \
-o /usr/share/keyrings/teleport-archive-keyring.asc
echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \
https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list
fi
apt-get update
apt-get install -y ${PACKAGE_LIST}
elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then
if [ "$ID" = "rhel" ]; then
VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version
fi
yum install -y yum-utils
yum-config-manager --add-repo \
"$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")"
# Remove metadata cache to prevent cache from other channel (eg, prior version)
# See: https://github.com/gravitational/teleport/issues/22581
yum --disablerepo="*" --enablerepo="teleport" clean metadata
yum install -y ${PACKAGE_LIST}
else
echo "Unsupported distro: $ID"
exit 1
fi
}
# package_list returns the list of packages to install.
# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions.
package_list() {
TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME}
TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater"
if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then
TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}"
TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}"
elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then
TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}"
TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}"
TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}"
fi
PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION}
# (warning): This expression is constant. Did you forget the $ on a variable?
# Disabling the warning above because expression is templated.
# shellcheck disable=SC2050
if is_using_systemd && [[ "false" == "true" ]]; then
# Teleport Updater requires systemd.
PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}"
fi
echo ${PACKAGE_LIST}
}
is_repo_available() {
if [[ "${OSTYPE}" != "linux-gnu" ]]; then
return 1
fi
# Populate $ID, $VERSION_ID and other env vars identifying the OS.
# shellcheck disable=SC1091
. /etc/os-release
# The following distros+version have a Teleport repository to install from.
case "${ID}-${VERSION_ID}" in
ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \
debian-9* | debian-10* | debian-11* | \
rhel-7* | rhel-8* | rhel-9* | \
centos-7* | centos-8* | centos-9* | \
amzn-2 | amzn-2023)
return 0;;
esac
return 1
}
if is_repo_available; then
log "Installing repo for distro $ID."
install_from_repo
else
log "Installing from binary file."
install_from_file
fi
# check that teleport binary can be found and runs
if ! check_teleport_binary; then
log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected."
log_important "This usually means that there was an error during installation."
log_important "Check this log for obvious signs of error and contact Teleport support"
log_important "for further assistance."
exit 1
fi
# install teleport config
# check the mode and write the appropriate config type
if [[ "${APP_INSTALL_MODE}" == "true" ]]; then
install_teleport_app_config
elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then
install_teleport_database_config
else
install_teleport_node_config
fi
# Used to track whether a Teleport agent was installed using this method.
export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true"
# install systemd unit if applicable (linux hosts)
if is_using_systemd; then
log "Host is using systemd"
# we only need to manually install the systemd config if teleport was installed via tarball
# all other packages will deploy it automatically
if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then
install_systemd_unit
fi
start_teleport_systemd
print_welcome_message
# install launchd config on MacOS hosts
elif is_macos_host; then
log "Host is running MacOS"
install_launchd_config
start_teleport_launchd
print_welcome_message
# not a MacOS host and no systemd available, print a warning
# and temporarily start Teleport in the foreground
else
log "Host does not appear to be using systemd"
no_systemd_warning
start_teleport_foreground
fi

View File

@@ -0,0 +1,28 @@
#!/bin/sh
apt_trusted_d_keyring="/etc/apt/trusted.gpg.d/datasaker-archive-keyring.gpg"
apt_usr_share_keyring="/usr/share/keyrings/datasaker-archive-keyring.gpg"
# init keyring
if ! [ -f ${apt_usr_share_keyring} ]; then
echo "create archive-keyring.gpg"
sudo touch ${apt_usr_share_keyring}
sudo chmod a+r ${apt_usr_share_keyring}
fi
# download keyring then add key to keyring
curl -fsSL -o /tmp/datasaker.gpg.key https://dsk-agent-s3.s3.ap-northeast-2.amazonaws.com/dsk-agent-s3/public/public.gpg.key
cat /tmp/datasaker.gpg.key | sudo gpg --import --batch --no-default-keyring --keyring "${apt_usr_share_keyring}"
# copy keyring to trusted keyring
if ! [ -f ${apt_trusted_d_keyring} ]; then
sudo cp -a ${apt_usr_share_keyring} ${apt_trusted_d_keyring}
fi
# add apt source list
if ! [ -f /etc/apt/sources.list.d/datasaker.list ]; then
echo "deb [signed-by=${apt_usr_share_keyring}] https://nexus.exem-oss.org/repository/debian-repos/ ubuntu main" | sudo tee /etc/apt/sources.list.d/datasaker.list > /dev/null
fi
sudo apt update
sudo apt install $1

5
ansible/00_old/key_test.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/usr/bin/expect -f
spawn ssh-copy-id root@$argv
expect "password:"
send "saasadmin1234\n"
expect eof

View File

@@ -0,0 +1,53 @@
---
- hosts: local
become: true
roles:
- role: dsk_bot.datasaker
vars:
datasaker_api_key: "XQOt9G3oAtsOQyd3U25JwOu3/sE+zj/m3kRKL/d0OUAQn30jVlrBKN/gJp9cJ4C9CHU1D1vSEPRxaCk8NuwZh6+v48TiaingDy6F74YGySRvnH0gqdmfxLSGisD/g8/JqBlIwhhyMSVCVfAkcNysLnwLi4xLnZMlvVi2Lzo3MiekSfJS5adR3hAv6pCaCFe2rNW24pYS5PBYkP/kxp/cfYAN/UhVEs5J+h4/iQ5vozQgvWuskBpOjlUeEYZnMZ6Al91gAUmSRoxtzLc+QOdfp7+uDXpwXPm80bQz9bR20Elcr4+rNqLcc2ONwJwrSveDSvJn4xjB6n95hEYbaDHUpA=="
datasaker_agents: ["dsk-node-agent","dsk-log-agent"]
#datasaker_api_key: "eO58wEYK/2HThAV+5jgv7Or/qW3zJknBQF0FJt5Xo4kSZ9YH2/CJgfNUwKbGwlbzmihG9dVsSmmS40szOuvRVZJO0vPga98sJNI32AJdWaYX8oCNFouI0lYG+r9Y4vahrS7+FVwntyfkjETotqBDvoQ5HjGjvW0wviPagW/alNbI5pvpWwBHtgz9D83Y8DSvCvO64G4xhyIYZPSML11EqWUO8prYT8LfdD4n2oBp0QJ3cXKdvJAUc4w5LKbTASb8x8UTpVU3JH3Wnwe79PKftJ8YdxOtb5jjzXeOEEM2GD8xz4pbB7scCx5oJCWQLF1js6a2uFLENBgW+ztHRf1j2Q=="
#datasaker_api_key: "1VL7/mhddWkQaS/vf/VjjwjnwaUhtZnLL++ih9LxYSB7HVkPpZw1Duy/4gxLN/73Vga00bD79mVd6N4dP0BVxmGqLnR6xItnSLlO3M6LmOMuM8bLaBuxxOvxST3lxpvtI0B2ilyjqTLh5y+NJWFV7Awq4zpqnPnTZ5dryp3yc4zc3C7Vxu0f2CL7/oGT0LRj/1l7gchuUxw2TVDLFFRylb+cFt6/NNylBxIb1wKGILd7N6NGgnsdRcrv4ZvTEPusrDqxO3IRYF6z9ZNbkQ1BPeDINtVFTgwhqFZjxg6erd8oqscB9n1DHOi6+tJ8VSHi2w5hYxHq93EV4cxBfzXAug=="
#datasaker_agents: ["dsk-node-agent","dsk-log-agent"]
datasaker_docker_agents: ["dsk-docker-node-agent","dsk-docker-log-agent"]
#datasaker_docker_agents: ["dsk-docker-log-agent"]
#postgres_user_name: jhjung
#postgres_user_password: 1q2w3e4r
#postgres_database_address: 0.0.0.0
#postgres_database_port: 5432
#plan_postgres_user_name: jhjung
#plan_postgres_user_password: 1q2w3e4r
#plan_postgres_database_address: 0.0.0.0
#plan_postgres_database_port: 5432
#plan_postgres_database_name: test
datagate_trace_url: 10.10.43.111
datagate_trace_port: 31300
datagate_trace_timeout: 5s
datagate_manifest_url: 10.10.43.111
datagate_manifest_port: 31301
datagate_manifest_timeout: 5s
datagate_metric_url: 10.10.43.111
datagate_metric_port: 31302
datagate_metric_timeout: 5s
datagate_plan_url: 10.10.43.111
datagate_plan_port: 31303
datagate_plan_timeout: 5s
datagate_loggate_url: 10.10.43.111
datagate_loggate_port: 31304
datagate_loggate_timeout: 5s
datasaker_api_url: 10.10.43.111:31501
datasaker_api_send_interval: 1m
#uninstall: True
#datasaker_clean: True
app_name: test
custom_log_volume:
- /var/lib/docker
- /var/docker
logs:
- collect:
type: driver

View File

@@ -0,0 +1,9 @@
---
- hosts: agent
become: true
roles:
- teleport
vars:
# remove: True
# custom_labels: 'user=havelight,company=exem'
update: True

27
ansible/00_old/roles.yaml Executable file
View File

@@ -0,0 +1,27 @@
---
- hosts: test
# become: true
# gather_facts: true
roles:
- role: datasaker
vars:
- datasaker_api_key: yCWIqbipuRMULli6qs4vWs8GfV9rQo8gciSKPvAozxiy05HcPru9LChyNQMtVk0xlmz7UqTj/s6682tiHa9wir/1hOxlLDYipWHPgHXZ1WEJDVvXD/z5Pw8G6IMcNwmgXwXfRZuRvWsSlHva28opykqE/oDHMcwnsABYljd+/VG8UEik08rRpI1t48We0HceZSuJ0aO+9FvoCcjPSHjrj17KCX1beS0UO3iHrRkQOFKOFfHK/fZ3G27YoZgs8ySH+90kLUP65AoAne5TFgXRVJJUCZgr5o2ajEyTi4bkwdt7v1X6/3fIO9kkElfQPZoCQ1u5S9eJfIkkmTEpWSLtuQ==
- datasaker_agents: ['dsk-node-agent','dsk-trace-agent','dsk-log-agent','dsk-plan-postgres-agent']
# vars:
# - datagate_trace_url: test
# - datagate_trace_port: test
# - datagate_trace_timeout: test
# - datagate_manifes_url: test
# - datagate_manifest_port: test
# - datagate_manifest_timeout: test
# - datagate_metric_url: test
# - datagate_metric_port: test
# - datagate_metric_timeout: test
# - datagate_plan_url: test
# - datagate_plan_port: test
# - datagate_plan_timeout: test
# - datagate_loggate_url: test
# - datagate_loggate_port: test
# - datagate_loggate_timeout: test
# - datasaker_api_url: test
# - datasaker_api_send_interval: test

18
ansible/00_old/test.yml Normal file
View File

@@ -0,0 +1,18 @@
---
- hosts: all
vars:
test_check: {}
tasks:
- name: test
set_fact:
test_check: "{{ test_check | default({}, true) }}"
- name: mapping
assert:
that:
- test_check is mapping
- name: test
debug:
msg:
- "{{ test_check }}"

View File

@@ -0,0 +1,10 @@
---
- hosts: all
tasks:
- name: Get password from Vault
ansible.builtin.debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hostname2 token=hvs.CAESIOy5Troiesm65BQYj_QhF996yilil8whnWP5FWHp3eE8Gh4KHGh2cy40OTBCT09SdTl1c3FRNmFXenFBUmxVSkE url=http://vault.vault:8200') }}"
register: vault_result
- name: Use password from Vault
ansible.builtin.command: echo "{{ vault_result.msg }}"

1
ansible/01_old/README.md Normal file
View File

@@ -0,0 +1 @@
# ansible_script

20
ansible/01_old/all_host Executable file
View File

@@ -0,0 +1,20 @@
[release]
10.10.43.100
10.10.43.101
[dsk_dev]
10.10.43.[110:200]
[cmoa]
10.10.43.[201:253]
[ubuntu]
13.125.123.49 ansible_user=ubuntu
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight
[dev2:children]
release
dsk_dev
cmoa

10
ansible/01_old/ansible.cfg Executable file
View File

@@ -0,0 +1,10 @@
[defaults]
inventory = inventory
roles_path = roles
deprecation_warnings = False
display_skipped_hosts = no
ansible_home = .
stdout_callback = debug
host_key_checking=False
#private_key_file=/root/.ssh/dev2-iac
#remote_tmp = /tmp/.ansible/tmp

View File

@@ -0,0 +1,26 @@
# Start from Python 3.9 base image
FROM nexus2.exem-oss.org/awx-ee:latest
USER root
# Update and install dependencies
RUN yum clean all && \
yum makecache && \
yum update -y && \
yum install -y sudo nfs-utils
# Python package management
RUN pip3 uninstall -y crypto pycrypto && \
pip3 install pycryptodome hvac xlwt
# Install kubectl
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin/
# Copy kubeconfig (Not recommended for production)
COPY kubeconfig /root/.kube/config
# Keep the container running
#CMD ["bash"]
CMD ["tail", "-f", "/dev/null"]

View File

@@ -0,0 +1,23 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXlPREF4TURJeU1Wb1hEVE15TVRJeU5UQXhNREl5TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT3drCldCR0hqaHRQQUZCVHNBNWJUMzIrNDl2WW4vbkZkd1h5MW5GNlBEYmUzL0ZMYzJDZGE2OENCSk95eTh2L2VvUW4KWGVleXRua05MTTdxaGFrbCtNUnBucFU2Wnp1NGdkdlJML1VHWDd3dTYwNFMvdkpQMXVNcFhMWEZEYUQzbFI2aQpEVm9jcUFmMGZsNFZDU21ldkJnTHpQOFl1cElrbllhc0FaRzJZbGQ2K1Y2RFNwWTA3aFBjQWNYdXo3c24vS202CjNqb2M0TnA1ZlFBSXVoZjVGdXA0UkE2YXAzWHZ3NllMNWExWFVUNzdjY1I3S0JHcDdtRFl3OVN1RloyVHFuOXIKSDUrQll0YytXdjlHNUZLOHcxVFAzYUFGYTJDMnhQYy9FWGxVa1UxOUsveE1ocDdiTm12eWlRd3VBbVI0NjJSKwo4aXBYNXFlam5hTnhITVVoeU5jQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNajA1VFJsa0w5SlZlNUdTS05UOThlbzVqOE1NQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTlA4aWFIZWh4a3RkTStSc3FJMwplU1VoUEFkdW5TSm5MdTJLRFZCYUgxeHVLQkNBbUUzNjBFbk1FUmFLQjNKWUVuVE9aeWR1Y3Z1amg0bVdVaGQrCjA2RVl0aDRYVW1sSytsVTJkWHdia2s4UGRrZnVrRE5ZOG8vV20vc05oSlF4VTlFMHhZSGwwSDgwVlQ1Mk5CR1oKSkRnUDREaUVibzluajBhaVJkaDFYMmROZkh5Vzl0VGZPM210OGVtUldSVVV1Ly91anNsMTJ1VjZNRjFTVmlRLwpCaU1iODMvVit5aHVWa09HVDk5c25BMTNCTkF1WVFDeUpaK3ZRdm5jdTdCbW5sMXdtRjhVTVNjYTFUMEZiOVR1CjFkNnVqWTM2U1pjbXlHSHU5SW1lb2FoYkdQMmNFNkI0YnlrNDA5UGFwbnl3dGRUNC9hREU5aGlQVGZMNkc0ZXEKMmRJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://10.10.43.240:6443
name: saas-mgmt
contexts:
- context:
cluster: saas-mgmt
user: saas-mgmt
name: saas-mgmt
current-context: saas-mgmt
kind: Config
preferences: {}
users:
- name: k8s-prod.datasaker.io
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lSQU1HZExUZ0psRzFLVXpoR1kzT2RIMVV3RFFZSktvWklodmNOQVFFTEJRQXcKR0RFV01CUUdBMVVFQXhNTmEzVmlaWEp1WlhSbGN5MWpZVEFlRncweU16QTRNak13TmpJME16ZGFGdzB5TXpBNApNall3TURJME16ZGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3hyCmRXSmxZMlpuTFhKdmIzUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEVEFrVXEKeUp6N1ZYak5uY0NRUy9qK29YdTZNWUtGSUxDYjNvUFVDelhSbzZsL3VXQy9hL1R2SmxHWmNJNFRzQURKdGdOYwovZEM4YlJGdUNaVmEwNGljMWY2U2VoSVZhVGYwbHh6SHZOQjcwSUQ0ajB1Z2JVeCtDS0pXNkF3Wk9jU0t4Yk5yCi9VNThZeFVjVVIxa2NhYTNmeU8zTmJNNGpENkE2TVQySFZCS3p4Wk4vek5HdjZCUy9RYjROVVFjMFprUDUvb0cKc1ptL3dtWVJZb1dyTld2NXVzWVFnS25HMUNyTVdNMFgySGtDWkF1cXpHUU9CMWpvK2RZWVQvWnhab2lIZEx3ZgpaWjIwb2paOHQ1MnBMZ1VlbTJsQ2xraXJHdEFKRzI2MGluVDNLMTBBakdSMEE5ZVlJNzd5VEQrUHZ2c2tTUHVkCnIxb1FqRDRWYUZIMG1IWTFBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJSGdEQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkVyeVN4QzRVNlNWTUtmVQpaWFBqNnlhSE9iSDlNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUURES3BreklUeFlEUFBjK1pabkJaZjdOV2wvCjVTRmRRVU9IRmZ6K1ZSMjNOdnBra0VQaVcrWXNiU2dFQUcvbldVK3gvVFJqN3JORDg2cGlmODNJYXN6WjlpV3gKK1ZYRE9rekQrcG5qWXlVa2t6WW8vM1dZQklRaUdlNTRBNjlUY2VEYjV0a3J6RkFxd3JhUXI4VFJ6VVVaNzVDVQp6dmFqMkRZcUhZN1dkRlJTZUhqcm9EVHB2d1BXSjU2YjI1d0NndGdHV29uM2ZBWERjV1Z2ZzJsUnZMOHI0ZmpCCkNSRSswNjdwSGkvc2VpNU1QMFI5WkZGbjRrbm1peHE0TW1tcjd6UGZ6ZFhwS1dDOHRZKzZOemszMzdnZFhZNnYKbjVNOW1VYjk5ZngwL0J5ZGNQeGVqeXVZV3I2Y01DKzBQOE9uU1BSUjM1MS8xYVFaQXJLWnU3TmYySFVUCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMHdKRktzaWMrMVY0elozQWtFdjQvcUY3dWpHQ2hTQ3dtOTZEMUFzMTBhT3BmN2xnCnYydjA3eVpSbVhDT0U3QUF5YllEWFAzUXZHMFJiZ21WV3RPSW5OWCtrbm9TRldrMzlKY2N4N3pRZTlDQStJOUwKb0cxTWZnaWlWdWdNR1RuRWlzV3phLzFPZkdNVkhGRWRaSEdtdDM4anR6V3pPSXcrZ09qRTloMVFTczhXVGY4egpScitnVXYwRytEVkVITkdaRCtmNkJyR1p2OEptRVdLRnF6VnIrYnJHRUlDcHh0UXF6RmpORjloNUFtUUxxc3hrCkRnZFk2UG5XR0UvMmNXYUloM1M4SDJXZHRLSTJmTGVkcVM0RkhwdHBRcFpJcXhyUUNSdHV0SXAwOXl0ZEFJeGsKZEFQWG1DTys4a3cvajc3N0pFajduYTlhRUl3K0ZXaFI5SmgyTlFJREFRQUJBb0lCQVFERnp1SUNhcEJuT01nSAprWFFja1d2NVlHNjVySUlieFBwckZGem00aDl3eUlrME9CZFBPNmdnclA1ZjVsajZjY3M3VFFxNEdTU2VEMjBBCmg3Rmd0TjdqaitTWGNpSVR1bEIvVlUzZ25NdWcxbVNoSHN3WnQzeTJ4ZWRScXpUMFRPaEg0M0FBc3pUcGZJVWsKeDVIVFFJdTJoMVIzQXJ0aExtL0ZydkE5ZkZ0eDFCM3d3TUtEdmtObDN2bU82TnMxY3J3cjJmOUw1TTNJUVJXQwpRbVNFOGFSUkk4Rnhob2FKb3JRY3U0VFpocDBGUzYrVkR3bG9OWkRGZ0M4Uk1YSWd0ZXZkVnpMdGxQUUVSUTA3CmhkdFZMcklGYktQNGtRaVk1emlUYlhKOXdFSFVLNWF6ekRVNnVNU3RZc3ZveVVpR3A4REozeWFPM0RwNS93MkYKaTFRcE1oRWRBb0dCQU5tT2NWL0tVODdiQ1IydnN2eU1DOVVZNS8wdWpZZU1ndk00dC9OWXlYUmRwVFYrSXk5aApBbGNmZHVnZUN4YlpPV2IzZVIwdE9uMTNGNlNYbnZodkRaZEZzT0w5SU0rd05ZZVJoUGFyZmVQb2JDQVVLZmVJCitTazllVUovNDVlckhkUWhyMlY1aXdwQXhIUXBza0ZWd3U1OHFQbzdLalFMTG1MeDFISkhMZDFIQW9HQkFQaEwKb3hqTXdFSmZudkZjLy9YUGJRSXgwWndiVUltQWFSYlIwUDIwZU1FWFhuNjZZQ1VIeDl4YVpIVHJqY2N1eXdiVgo1QjJHYThHMzVDN0oydldvZWVaamU2c2xJTE5UMm1jNG4wSWFRSFR1a2ZPWHpKQUhRSEZXQWI2TnZJTlpaK3hGCkJ1YndwWHRYUDlvVFh2MG41MEpTUzgyOW1LWG55c25RbDBNNzk5NmpBb0dBVHE1YmhuOVFMQ0cvZkVNTkFwVkwKdWpnVnZ0VlZUazZkRllYUDBXeXMveTdYRHkrZFhnZEJwMnl6dm1NUE02WkFRbU1DSkhFMUZDYzhIOFRPTTU5RwpWUTFaV2Q2ZVBUN0hQVTU5dmhCcnFUOW55M28vYTB6WWYvZkJvVEZMaUpEVWF1SDc0MEUvN2VkYXBZQm0vWVljCng4L0I5UzNzcDRIYnR1RXJLbUZmendVQ2dZRUE0SDdLMVdaelFzL2dEczBPWkxzS0RaenJyMkNHL2Z2TGlLVm0KZDYxUUxRMnJFNXdCeUJsejNFa2lZUkNGWFIxeTFoaFlLMVhaWWdxWlZyQ050K1YvYWc1eXgzaEhTN3k2VU8vQwpGdXRUY2lZdWNuZkNya3JRT21rUUpMRlVTOUp2Z3hHYVB2NUFNUGZmTkphbElQR09SOG5PM2hQWnk4OTY2K1FjCmo5N05xMDhDZ1lCSWFXb3ZaVDMrOE5hR1hSQTRBaU1RU0MweFNCYlhacitCSHZ4TVlUQ1loMlJKZTVTM0FzNGUKc2Z0d0I2c0MvYXI2bkppOFVpN05VdysrTUk0VUZORE05c0JWUXp3NUUwNlFCSE1TcGxQdXRCRGVXTE5RN0FDNwpkM2Q5bHE4RjhXU3o0c0JiVlZvUjdheUJpU1F2blU1Q3NOSXFBb2ExYUllTzNhdVcyQWd0OEE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
- name: saas-mgmt
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJSFVDZ2pMMXhPRHd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFeU1qZ3dNVEF5TWpGYUZ3MHlOREF4TURNd016QXhNalJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXBGWVNKT1YrM3QrbWVOQ2sKUnNMdWVyMmFSbm5qRHBvYTlBTkh5TWZ4WWRmYTNRTS9VZGlPUlEraWFTMU1jZjFPUFVzRlloOU1iS2p6S0tCTApGYTh2NkJ0dGN2bVIwTFRLcTY2MHFIUWZOMVUxUEhTUnhROElQTzg5bUhjYkI1Z2N3MktRRWNIWVNsaURBVHM3CllkK2JqMWtabWIrUVIxQlhlZlVoZ0pvYk1UazNrdERBMG1jRlVHSExKakxjUTF2UlNiR2s0NVJRY2d5SlBjdy8KM0VPTXkzcHRzeUpldk5tb3c2N0pmVXp3d1NBM0hOd0paTzB0enVETGprRTNMOXgwVC9idGVXTlI0eUEvTFp2QQppRGkrdmVnMFBJL2FYbHBvejVOVjNwdVQwOGJnRVVWZ0dUdWhRQWtxZkk1SmpjYXBLSXpLRzBMVWdxWXRjQWJRClE3ZDcvd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUSTlPVTBaWkMvU1ZYdVJraWpVL2ZIcU9ZLwpEREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBNGluVHEzU1krcXpTczlGYWwrMXdRNlNWSVJXOEw5anB6NGdsCnNCZGpZV05DbzRzTURTNG95bEh5ZTVhMXZZR3ZqdXBORjUyVG5sL2UzU3U0ekxWTHZNMXd4VHR3aUVqNjlOTGcKR2RwemtuUk5INTM0UHFhaXdxNk9xOWd5MXRIQmIyTHNadlFRN0FBanFKVG5SL01NNVI4cEllSnpFdk5xSHVMdAovWndXRWg2enJXVkVON3IxWVN6elpCRVgxaGh5dE5abUltTkhQNVFPcmxueXZLc1luV0hLL0JTTHNHK2dQc0htCkJzQkhtblNBeTh2Wk5OSWJZQk5SMmJ6WkZiVWExaTZ4aHlxRUNvdEpXMHVvOWlSN0tpbGFnelNJN1hNdUdhZFkKYkFvcHVZWDJTTkROMnhiZWJkOWhxRkRRcHZwUTlXSE00ZTBpNjhjOUgvOTA2MS9hQnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcEZZU0pPViszdCttZU5Da1JzTHVlcjJhUm5uakRwb2E5QU5IeU1meFlkZmEzUU0vClVkaU9SUStpYVMxTWNmMU9QVXNGWWg5TWJLanpLS0JMRmE4djZCdHRjdm1SMExUS3E2NjBxSFFmTjFVMVBIU1IKeFE4SVBPODltSGNiQjVnY3cyS1FFY0hZU2xpREFUczdZZCtiajFrWm1iK1FSMUJYZWZVaGdKb2JNVGsza3REQQowbWNGVUdITEpqTGNRMXZSU2JHazQ1UlFjZ3lKUGN3LzNFT015M3B0c3lKZXZObW93NjdKZlV6d3dTQTNITndKClpPMHR6dURMamtFM0w5eDBUL2J0ZVdOUjR5QS9MWnZBaURpK3ZlZzBQSS9hWGxwb3o1TlYzcHVUMDhiZ0VVVmcKR1R1aFFBa3FmSTVKamNhcEtJektHMExVZ3FZdGNBYlFRN2Q3L3dJREFRQUJBb0lCQUJpd2JhMXBaVFFxdWFIeApCcDB5OEEwMHF4Ym5mUHRXbjdJRlJDV2dGRjIweGticUUvdEI0NjN3ZVYvLzFEcFQ2Z3MvV0NHenZoR2RHRnNFCktnT3AvREtNM0ZhbnRBWjlBdTNrSTNRamJnVXNJZ0ZoS2YxSEV0L0V1YVpNVHAxSGR4ckxsZ1YwNy8vTGFITW8KNlBUOVdTdWlJVHgrRVRrRmt2N1pteHp0Q2lUTXpCQTJ4YW9paWk4dEs1NkM4K1JSeW5wMVlhSnl0VkQra3diZwpwZmErVThlUFEwRXUyeFkzVVRkdHZLSDYrSVVHdUppMVI3Nk1qM3lZRFl3NkJmN2lzMU1tSXhKM3FpdjVFRFhZCjdDSXR2VmxyQTBWczJrREpHVXdYZGhQdHU2RmUzWXc1eFNaRnR2cXkyVnpXcEhvcVB0RG16cy85ZXpnbTExeUUKWDNmR00yRUNnWUVBd0VZSGd2cmxWUXU3RW44RUlQZFVsMnZOQnBueWxZNzFWTnY5Q0dpLzNyKzhQcFNKenBiRQp0azVoTTNmeHJ4Q0s3SGI4NXBKSDl2dzh2akFwb1VmeDVIYnJESkpkS1F4Rlp5RStSRnpHVG1KN0M2VERQanVaCjl1SVhFRkxQd3RndFJqZ2hhUk1MSm16SGlJVkQ0cmpjZHhRTlN3ZWc3ZEhOOFQ5UVVHajhiUmtDZ1lFQTJzMmYKVlFLMnhlaHJLT0VMYVo5a3pvZlYyUUEvN0RFSzdvOHBuOGZhSk1SRVJlSmdTb3MyZ0p2cU5mdVhNMVp1K2crLwpuSno0L0kxSW9aQUdLTllxZHlhLzVrdDRjTkZjMTFrK3MzaE91Z01lbnBKSXNUU2EyS1o4RHl0Sm5pWTZUOFZMClBlWnlSNy9xb2p2dWUyY3NvSTFPUWxHcGFPa09WQ0NaZTc0V1BOY0NnWUVBdHc3MWYrTFlEVXlKNDJaQ3pCQXUKM2F1cEhDdmVKajVoblZXRlowZ3p4U1BQV2RXYURyTEV2cjJKRmJPUXl4aDQ3QUd0YnVpKzA0djdXU2dKdXFBQQowWC9XOGJVNE5TaVZ1MGFQUGc4R1R3SzhHNjNXcFoyaFRNaWRKTkZ6TlJNVXA5SXhIUlVnZklqOHdDSUJMQTdNCitDS0ROWGdoNDhyb3hGTi9aODlNNWFFQ2dZQkFsZXVQTzJMYUhsWHJWaXA1UGd5U2pqUUlmdk5mYzhhSFRvajUKMmhOQlFSSHFFdjFiWTZadDVoZ0hZVUZyYlBzTEl6VHJOTWFtUGNvUHJxU3l6eXp2eU9kaVFpckdHbmF1Tm5DMApwekdONUxmWUZOUVNRclhtZDVZdElCajE3dERObFM0MWtsMXZZbTRPLzJQUTEwNnNBYW4xRjRmTEtPZ0syeWlUCkJ6UW5Od0tCZ1FDWVVZWEtNZm1vSEhGQkJuVjcxY2xQV1BRUC94bmY5SGdnalJJK2dvSDNkRzRxOXdYS2FpaHYKYndIRmNJQXFPYWkzTm1HT1dydzA4bkNENFYyMVZRQWd0ZFdjMFVmS254M2svdFFheHUyM00xaWdkV2JQbXcyUwp4em1TY0sweHVjdkdvQ0VaUWZ0cGlQOVFwUWJIQ0xGNUFIM2tRdExRZDdzOTErZkU2cGtsYlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
name: awx-test
namespace: ansible-awx
spec:
containers:
- image: nexus2.exem-oss.org/awx-ee:latest
imagePullPolicy: Always
name: awx-test
resources: {}
securityContext:
privileged: true

View File

@@ -0,0 +1,19 @@
---
- hosts: all
become: true
roles:
- connect-settings
# - teleport
vars:
username: dev2
adminuser: root
manual_password: saasadmin1234
sshmainport: 2222
#encrypt: 1
#debug_mode: True
teleport_uri: teleport.kr.datasaker.io
# remove: True
# custom_labels: 'user=havelight,company=exem'
update: True
# install: True

19
ansible/01_old/infra-test Normal file
View File

@@ -0,0 +1,19 @@
[redhat]
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
[ubuntu]
#10.10.43.234 ansible_port=22 ansible_user=root
10.10.43.180 ansible_port=22 ansible_user=ubuntu
[tmp]
10.10.43.147 ansible_port=2222 ansible_user=ubuntu
[proxy]
10.10.43.20
[agent:children]
redhat
ubuntu

BIN
ansible/01_old/inventory/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,2 @@
[api]
10.10.43.[170:199]

View File

@@ -0,0 +1,14 @@
[dsk_dev]
10.10.43.[1:253]
[bastion]
10.10.43.43 ansible_port=2222
[old_ip]
10.10.30.100
10.10.30.219
10.10.31.33
10.10.31.37
10.10.31.243
10.10.34.128
10.10.34.172

View File

@@ -0,0 +1,2 @@
[api]
10.10.43.[160:169]

View File

@@ -0,0 +1,52 @@
[prod-master]
i-041b16dbf097aa03f ansible_user=ubuntu
i-0446ae551deac0b98 ansible_user=ubuntu
i-0628e1521b484fde1 ansible_user=ubuntu
[prod-node]
i-001143efb27f5c473 ansible_user=ubuntu
i-00608e3310900aff0 ansible_user=ubuntu
i-009e56755c657e557 ansible_user=ubuntu
i-01b968c93a1e29888 ansible_user=ubuntu
i-0317fd83555444cda ansible_user=ubuntu
i-040d9267937974ffc ansible_user=ubuntu
i-041f2d6ebcbb50593 ansible_user=ubuntu
i-046572392b64748b1 ansible_user=ubuntu
i-04712bc4966dadc68 ansible_user=ubuntu
i-04abf3bdccadaf05c ansible_user=ubuntu
i-0524126bf5fdc1483 ansible_user=ubuntu
i-06b7d476688a345e6 ansible_user=ubuntu
i-06c80df6180d036bf ansible_user=ubuntu
i-076a7682ef711c83a ansible_user=ubuntu
i-079a7ec37fe655415 ansible_user=ubuntu
i-08125130e3caf704f ansible_user=ubuntu
i-088361873fe1f1eb2 ansible_user=ubuntu
i-08b15bdf81b808b23 ansible_user=ubuntu
i-0986b098c1961eb8d ansible_user=ubuntu
i-09a8af86b9a3d6474 ansible_user=ubuntu
i-09ecd63d914f62fc3 ansible_user=ubuntu
i-0a0942fb91f9968d8 ansible_user=ubuntu
i-0a768908aade20566 ansible_user=ubuntu
i-0d5eb423a05b70f84 ansible_user=ubuntu
i-0dc48b11bbb330012 ansible_user=ubuntu
i-0dd28df2ff60bf63b ansible_user=ubuntu
i-0ddf860cc0e3c4b92 ansible_user=ubuntu
i-0e4a89bcc7c6421bf ansible_user=ubuntu
i-0e5e379f9b04cd2fa ansible_user=ubuntu
i-0f0b728f94d19d020 ansible_user=ubuntu
i-0f4f0edc7a431de84 ansible_user=ubuntu
i-0f5eef7ed3a20e103 ansible_user=ubuntu
[prod-spot]
i-002c94ea83e2312ea ansible_user=ubuntu
i-05877be601a14cc23 ansible_user=ubuntu
i-0619e290ef4d67954 ansible_user=ubuntu
i-0bd24dc5f5a740dcb ansible_user=ubuntu
i-0e3edc671c6bccfb7 ansible_user=ubuntu
[all:children]
prod-master
prod-node
[all:vars]
ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q ubuntu@bastion.kr.datasaker.io"'

13
ansible/01_old/inventory/cmoa Executable file
View File

@@ -0,0 +1,13 @@
[cmoa_1]
10.10.43.[200:209]
[cmoa_2]
10.10.43.[210:219]
[cmoa_etc]
10.10.43.[220:229]
[cmoa:children]
cmoa_1
cmoa_2
cmoa_etc

View File

@@ -0,0 +1,5 @@
[dsk_dev]
10.10.43.[100:159]
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=dev2-iac

7
ansible/01_old/inventory/etc Executable file
View File

@@ -0,0 +1,7 @@
[etc]
10.10.43.[1:10]
10.10.43.[14:30]
10.10.43.[44:99]
[xcp-ng]
10.10.43.[31:42]

View File

@@ -0,0 +1,2 @@
[infra]
10.10.43.[230:249]

View File

@@ -0,0 +1,76 @@
[all]
10.10.43.100 ansible_port=2222 ansible_user=dev2
10.10.43.101 ansible_port=2222 ansible_user=dev2
10.10.43.105 ansible_port=2222 ansible_user=dev2
10.10.43.106 ansible_port=2222 ansible_user=dev2
10.10.43.111 ansible_port=2222 ansible_user=dev2
10.10.43.112 ansible_port=2222 ansible_user=dev2
10.10.43.113 ansible_port=2222 ansible_user=dev2
10.10.43.114 ansible_port=2222 ansible_user=dev2
10.10.43.115 ansible_port=2222 ansible_user=dev2
10.10.43.116 ansible_port=2222 ansible_user=dev2
10.10.43.117 ansible_port=2222 ansible_user=dev2
10.10.43.118 ansible_port=2222 ansible_user=dev2
10.10.43.119 ansible_port=2222 ansible_user=dev2
10.10.43.120 ansible_port=2222 ansible_user=dev2
10.10.43.121 ansible_port=2222 ansible_user=dev2
10.10.43.122 ansible_port=2222 ansible_user=dev2
10.10.43.123 ansible_port=2222 ansible_user=dev2
10.10.43.124 ansible_port=2222 ansible_user=dev2
10.10.43.125 ansible_port=2222 ansible_user=dev2
10.10.43.126 ansible_port=2222 ansible_user=dev2
10.10.43.127 ansible_port=2222 ansible_user=dev2
10.10.43.128 ansible_port=2222 ansible_user=dev2
10.10.43.129 ansible_port=2222 ansible_user=dev2
10.10.43.130 ansible_port=2222 ansible_user=dev2
10.10.43.131 ansible_port=2222 ansible_user=dev2
10.10.43.132 ansible_port=2222 ansible_user=dev2
10.10.43.133 ansible_port=2222 ansible_user=dev2
10.10.43.134 ansible_port=2222 ansible_user=dev2
10.10.43.135 ansible_port=2222 ansible_user=dev2
10.10.43.136 ansible_port=2222 ansible_user=dev2
10.10.43.137 ansible_port=2222 ansible_user=dev2
10.10.43.138 ansible_port=2222 ansible_user=dev2
10.10.43.139 ansible_port=2222 ansible_user=dev2
10.10.43.140 ansible_port=2222 ansible_user=dev2
10.10.43.141 ansible_port=2222 ansible_user=dev2
10.10.43.142 ansible_port=2222 ansible_user=dev2
10.10.43.143 ansible_port=2222 ansible_user=dev2
10.10.43.144 ansible_port=2222 ansible_user=dev2
10.10.43.145 ansible_port=2222 ansible_user=dev2
10.10.43.146 ansible_port=2222 ansible_user=dev2
10.10.43.147 ansible_port=2222 ansible_user=dev2
10.10.43.148 ansible_port=2222 ansible_user=dev2
10.10.43.151 ansible_port=2222 ansible_user=dev2
10.10.43.152 ansible_port=2222 ansible_user=dev2
10.10.43.153 ansible_port=2222 ansible_user=dev2
10.10.43.164 ansible_port=2222 ansible_user=dev2
10.10.43.165 ansible_port=2222 ansible_user=dev2
10.10.43.166 ansible_port=2222 ansible_user=dev2
10.10.43.167 ansible_port=2222 ansible_user=dev2
10.10.43.168 ansible_port=2222 ansible_user=dev2
10.10.43.169 ansible_port=2222 ansible_user=dev2
10.10.43.171 ansible_port=2222 ansible_user=dev2
10.10.43.172 ansible_port=2222 ansible_user=dev2
10.10.43.173 ansible_port=2222 ansible_user=dev2
10.10.43.174 ansible_port=2222 ansible_user=dev2
10.10.43.175 ansible_port=2222 ansible_user=dev2
10.10.43.176 ansible_port=2222 ansible_user=dev2
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
10.10.43.180 ansible_port=2222 ansible_user=dev2
10.10.43.181 ansible_port=2222 ansible_user=dev2
10.10.43.182 ansible_port=2222 ansible_user=dev2
10.10.43.185 ansible_port=2222 ansible_user=dev2
10.10.43.186 ansible_port=2222 ansible_user=dev2
10.10.43.187 ansible_port=2222 ansible_user=dev2
10.10.43.188 ansible_port=2222 ansible_user=dev2
10.10.43.189 ansible_port=2222 ansible_user=dev2
10.10.43.190 ansible_port=2222 ansible_user=dev2
10.10.43.191 ansible_port=2222 ansible_user=dev2
10.10.43.192 ansible_port=2222 ansible_user=dev2
10.10.43.193 ansible_port=2222 ansible_user=dev2
10.10.43.194 ansible_port=2222 ansible_user=dev2
10.10.43.199 ansible_port=2222 ansible_user=dev2

View File

@@ -0,0 +1,65 @@
[prod-demo-master]
10.10.43.100 ansible_port=2222 ansible_user=dev2
[prod-demo-worker]
10.10.43.101 ansible_port=2222 ansible_user=dev2
[dev-demo-master]
10.10.43.105 ansible_port=2222 ansible_user=dev2
[dev-demo-worker]
10.10.43.106 ansible_port=2222 ansible_user=dev2
[saas_mgmt_master]
10.10.43.240 ansible_port=2222 ansible_user=dev2
[saas_mgmt_node]
10.10.43.[241:243] ansible_port=2222 ansible_user=dev2
[dsk_dev_master]
10.10.43.[111:113] ansible_port=2222 ansible_user=dev2
[dsk_dev_node]
10.10.43.[114:153] ansible_port=2222 ansible_user=dev2
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight
[agent_host]
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
10.10.43.180 ansible_port=2222 ansible_user=dev2
10.10.43.181 ansible_port=2222 ansible_user=dev2
10.10.43.182 ansible_port=2222 ansible_user=dev2
[agent_cri_master]
10.10.43.185 ansible_port=2222 ansible_user=dev2
[agent_cri_worker]
10.10.43.186 ansible_port=2222 ansible_user=dev2
10.10.43.187 ansible_port=2222 ansible_user=dev2
10.10.43.188 ansible_port=2222 ansible_user=dev2
[agent_middleware_master]
10.10.43.189 ansible_port=2222 ansible_user=dev2
[agent_middleware_worker]
10.10.43.190 ansible_port=2222 ansible_user=dev2
10.10.43.191 ansible_port=2222 ansible_user=dev2
10.10.43.192 ansible_port=2222 ansible_user=dev2
10.10.43.193 ansible_port=2222 ansible_user=dev2
10.10.43.194 ansible_port=2222 ansible_user=dev2
10.10.43.199 ansible_port=2222 ansible_user=dev2
[all:children]
saas_mgmt_master
saas_mgmt_node
dsk_dev_master
dsk_dev_node
bastion
agent_host
agent_cri_master
agent_cri_worker
agent_middleware_master
agent_middleware_worker

View File

@@ -0,0 +1,41 @@
iptables -A INPUT -s 10.10.45.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.47.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.48.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.50.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.37.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.45.0/24 -j DROP
iptables -A INPUT -s 10.10.47.0/24 -j DROP
iptables -A INPUT -s 10.10.48.0/24 -j DROP
iptables -A INPUT -s 10.10.50.0/24 -j DROP
iptables -A INPUT -s 10.10.37.0/24 -j DROP
iptables -A INPUT -s 10.10.43.200 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.43.200 -j DROP
- { source: "", target: "DROP" }
- { source: "10.10.45.0/24", target: "DROP" }
- { source: "10.10.47.0/24", target: "DROP" }
- { source: "10.10.48.0/24", target: "DROP" }
- { source: "10.10.50.0/24", target: "DROP" }
- { source: "10.10.37.0/24", target: "DROP" }
- { source: "10.10.45.196", target: "DROP" }
- { source: "10.10.47.20", target: "DROP" }
- { source: "10.10.47.231", target: "DROP" }
- { source: "10.10.47.233", target: "DROP" }
- { source: "10.10.48.252", target: "DROP" }
- { source: "10.10.50.232", target: "DROP" }
- { source: "10.10.50.233", target: "DROP" }
10.10.45.174
10.10.45.196
10.10.47.20
10.10.47.231
10.10.47.233
10.10.48.252
10.10.50.232
10.10.50.233

View File

@@ -0,0 +1,41 @@
[test-ubuntu]
10.10.43.[164:166] ansible_port=2222 ansible_user=dev2-iac
[test-centos]
10.10.43.[167:169] ansible_port=2222 ansible_user=dev2-iac
[test-openshfit]
10.10.43.171 ansible_port=2222 ansible_user=dev2-iac
[test-opensuse]
10.10.43.172 ansible_port=2222 ansible_user=dev2-iac
[test-debian]
10.10.43.173 ansible_port=2222 ansible_user=dev2-iac
[test-oracle]
10.10.43.174 ansible_port=2222 ansible_user=dev2-iac
[test-amazon]
10.10.43.175 ansible_port=2222 ansible_user=dev2-iac
[test-db_env_cluster]
10.10.43.176 ansible_port=2222 ansible_user=dev2-iac
[test-redhat]
10.10.43.[177:179] ansible_port=2222 ansible_user=dev2-iac
[test-docker]
10.10.43.180 ansible_port=2222 ansible_user=dev2-iac
[all:children]
test-ubuntu
test-centos
test-openshfit
test-opensuse
test-debian
test-oracle
test-amazon
test-db_env_cluster
test-redhat
test-docker

View File

@@ -0,0 +1,76 @@
[server]
10.10.43.100 ansible_port=2222 ansible_user=dev2
10.10.43.101 ansible_port=2222 ansible_user=dev2
10.10.43.105 ansible_port=2222 ansible_user=dev2
10.10.43.106 ansible_port=2222 ansible_user=dev2
10.10.43.111 ansible_port=2222 ansible_user=dev2
10.10.43.112 ansible_port=2222 ansible_user=dev2
10.10.43.113 ansible_port=2222 ansible_user=dev2
10.10.43.114 ansible_port=2222 ansible_user=dev2
10.10.43.115 ansible_port=2222 ansible_user=dev2
10.10.43.116 ansible_port=2222 ansible_user=dev2
10.10.43.117 ansible_port=2222 ansible_user=dev2
10.10.43.118 ansible_port=2222 ansible_user=dev2
10.10.43.119 ansible_port=2222 ansible_user=dev2
10.10.43.120 ansible_port=2222 ansible_user=dev2
10.10.43.121 ansible_port=2222 ansible_user=dev2
10.10.43.122 ansible_port=2222 ansible_user=dev2
10.10.43.123 ansible_port=2222 ansible_user=dev2
10.10.43.124 ansible_port=2222 ansible_user=dev2
10.10.43.125 ansible_port=2222 ansible_user=dev2
10.10.43.126 ansible_port=2222 ansible_user=dev2
10.10.43.127 ansible_port=2222 ansible_user=dev2
10.10.43.128 ansible_port=2222 ansible_user=dev2
10.10.43.129 ansible_port=2222 ansible_user=dev2
10.10.43.130 ansible_port=2222 ansible_user=dev2
10.10.43.131 ansible_port=2222 ansible_user=dev2
10.10.43.132 ansible_port=2222 ansible_user=dev2
10.10.43.133 ansible_port=2222 ansible_user=dev2
10.10.43.134 ansible_port=2222 ansible_user=dev2
10.10.43.135 ansible_port=2222 ansible_user=dev2
10.10.43.136 ansible_port=2222 ansible_user=dev2
10.10.43.137 ansible_port=2222 ansible_user=dev2
10.10.43.138 ansible_port=2222 ansible_user=dev2
10.10.43.139 ansible_port=2222 ansible_user=dev2
10.10.43.140 ansible_port=2222 ansible_user=dev2
10.10.43.141 ansible_port=2222 ansible_user=dev2
10.10.43.142 ansible_port=2222 ansible_user=dev2
10.10.43.143 ansible_port=2222 ansible_user=dev2
10.10.43.144 ansible_port=2222 ansible_user=dev2
10.10.43.145 ansible_port=2222 ansible_user=dev2
10.10.43.146 ansible_port=2222 ansible_user=dev2
10.10.43.147 ansible_port=2222 ansible_user=dev2
10.10.43.148 ansible_port=2222 ansible_user=dev2
10.10.43.151 ansible_port=2222 ansible_user=dev2
10.10.43.152 ansible_port=2222 ansible_user=dev2
10.10.43.153 ansible_port=2222 ansible_user=dev2
10.10.43.164 ansible_port=2222 ansible_user=dev2
10.10.43.165 ansible_port=2222 ansible_user=dev2
10.10.43.166 ansible_port=2222 ansible_user=dev2
10.10.43.167 ansible_port=2222 ansible_user=dev2
10.10.43.168 ansible_port=2222 ansible_user=dev2
10.10.43.169 ansible_port=2222 ansible_user=dev2
10.10.43.171 ansible_port=2222 ansible_user=dev2
10.10.43.174 ansible_port=2222 ansible_user=dev2
10.10.43.176 ansible_port=2222 ansible_user=dev2
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
10.10.43.180 ansible_port=2222 ansible_user=dev2
10.10.43.181 ansible_port=2222 ansible_user=dev2
10.10.43.182 ansible_port=2222 ansible_user=dev2
10.10.43.185 ansible_port=2222 ansible_user=dev2
10.10.43.186 ansible_port=2222 ansible_user=dev2
10.10.43.187 ansible_port=2222 ansible_user=dev2
10.10.43.188 ansible_port=2222 ansible_user=dev2
10.10.43.189 ansible_port=2222 ansible_user=dev2
10.10.43.190 ansible_port=2222 ansible_user=dev2
10.10.43.191 ansible_port=2222 ansible_user=dev2
10.10.43.192 ansible_port=2222 ansible_user=dev2
10.10.43.193 ansible_port=2222 ansible_user=dev2
10.10.43.194 ansible_port=2222 ansible_user=dev2
10.10.43.199 ansible_port=2222 ansible_user=dev2
[all:children]
server

3
ansible/01_old/inventory2 Executable file
View File

@@ -0,0 +1,3 @@
[servers]
10.10.43.100

33
ansible/01_old/inventory_agent Executable file
View File

@@ -0,0 +1,33 @@
[agent_host]
10.10.43.177
10.10.43.178
10.10.43.179
10.10.43.180 ansible_user=dev2
10.10.43.182 ansible_user=dev2
[agent_cri_master]
10.10.43.185
[agent_cri_worker]
10.10.43.186
10.10.43.187
10.10.43.188
[agent_middleware_master]
10.10.43.189
[agent_middleware_worker]
10.10.43.190
10.10.43.191
10.10.43.192
10.10.43.193
10.10.43.194
10.10.43.199
[all:children]
agent_host
agent_cri_master
agent_cri_worker
agent_middleware_master
agent_middleware_worker

32
ansible/01_old/inventory_api Executable file
View File

@@ -0,0 +1,32 @@
[master]
10.10.43.160
[worker1]
10.10.43.161
[worker2]
10.10.43.162
[worker3]
10.10.43.163
[cluster:children]
master
worker1
worker2
worker3
[master:vars]
kubernetes_role="master"
runtime="containerd"
[worker1:vars]
kubernetes_role="node"
runtime="containerd"
[worker2:vars]
kubernetes_role="node"
runtime="containerd"
[worker3:vars]
kubernetes_role="node"
runtime="containerd"

22
ansible/01_old/inventory_bak Executable file
View File

@@ -0,0 +1,22 @@
[release]
10.10.43.100
10.10.43.101
[amazon]
15.165.19.5 ansible_user=ec2-user
[ubuntu]
13.125.123.49 ansible_user=ubuntu
[aws:children]
amazon
ubuntu
[monitoring]
3.38.1.96
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight
[hong]
10.10.52.55

18
ansible/01_old/inventory_cent Executable file
View File

@@ -0,0 +1,18 @@
[master]
10.10.43.244
[worker1]
10.10.43.245
[cluster:children]
master
worker1
[master:vars]
kubernetes_role="master"
runtime="containerd"
[worker1:vars]
kubernetes_role="node"
runtime="containerd"

View File

@@ -0,0 +1,35 @@
[host]
10.10.43.111
10.10.43.112
10.10.43.113
10.10.43.114
10.10.43.115
10.10.43.116
10.10.43.117
10.10.43.118
10.10.43.119
10.10.43.120
10.10.43.121
10.10.43.122
10.10.43.123
10.10.43.124
10.10.43.125
10.10.43.126
10.10.43.127
10.10.43.128
10.10.43.129
10.10.43.130
10.10.43.131
10.10.43.132
10.10.43.133
10.10.43.134
10.10.43.135
10.10.43.136
10.10.43.137
10.10.43.138
10.10.43.139
10.10.43.140
10.10.43.141
[test]
10.10.43.142

View File

@@ -0,0 +1,75 @@
[local]
127.0.0.1
[nas]
10.10.43.42 ansible_user=exemdev2 ansible_port=2222
[aws]
3.39.22.205 ansible_user=ec2-user
[centos]
10.10.43.231 ansible_user=centos
10.10.43.232 ansible_user=centos
10.10.43.233 ansible_user=centos
[redhat]
10.10.43.177 ansible_user=redhat
10.10.43.178 ansible_user=redhat
10.10.43.179 ansible_user=redhat
[ubuntu]
10.10.43.97 ansible_user=ubuntu
10.10.43.234 ansible_user=ubuntu
[agent:children]
centos
redhat
ubuntu
[test]
10.10.43.111 ansible_user=dev2 ansible_port=2222
10.10.43.112 ansible_user=dev2 ansible_port=2222
10.10.43.113 ansible_user=dev2 ansible_port=2222
10.10.43.114 ansible_user=dev2 ansible_port=2222
10.10.43.115 ansible_user=dev2 ansible_port=2222
10.10.43.116 ansible_user=dev2 ansible_port=2222
10.10.43.117 ansible_user=dev2 ansible_port=2222
10.10.43.118 ansible_user=dev2 ansible_port=2222
10.10.43.119 ansible_user=dev2 ansible_port=2222
10.10.43.120 ansible_user=dev2 ansible_port=2222
10.10.43.121 ansible_user=dev2 ansible_port=2222
10.10.43.122 ansible_user=dev2 ansible_port=2222
10.10.43.123 ansible_user=dev2 ansible_port=2222
10.10.43.124 ansible_user=dev2 ansible_port=2222
10.10.43.125 ansible_user=dev2 ansible_port=2222
10.10.43.126 ansible_user=dev2 ansible_port=2222
10.10.43.127 ansible_user=dev2 ansible_port=2222
10.10.43.128 ansible_user=dev2 ansible_port=2222
10.10.43.129 ansible_user=dev2 ansible_port=2222
10.10.43.130 ansible_user=dev2 ansible_port=2222
10.10.43.131 ansible_user=dev2 ansible_port=2222
10.10.43.132 ansible_user=dev2 ansible_port=2222
10.10.43.133 ansible_user=dev2 ansible_port=2222
10.10.43.134 ansible_user=dev2 ansible_port=2222
10.10.43.135 ansible_user=dev2 ansible_port=2222
10.10.43.136 ansible_user=dev2 ansible_port=2222
10.10.43.137 ansible_user=dev2 ansible_port=2222
10.10.43.138 ansible_user=dev2 ansible_port=2222
10.10.43.139 ansible_user=dev2 ansible_port=2222
10.10.43.140 ansible_user=dev2 ansible_port=2222
10.10.43.141 ansible_user=dev2 ansible_port=2222
10.10.43.142 ansible_user=dev2 ansible_port=2222
10.10.43.143 ansible_user=dev2 ansible_port=2222
10.10.43.144 ansible_user=dev2 ansible_port=2222
10.10.43.145 ansible_user=dev2 ansible_port=2222
10.10.43.146 ansible_user=dev2 ansible_port=2222
10.10.43.148 ansible_user=dev2 ansible_port=2222
10.10.43.151 ansible_user=dev2 ansible_port=2222
10.10.43.152 ansible_user=dev2 ansible_port=2222
10.10.43.153 ansible_user=dev2 ansible_port=2222
10.10.43.240 ansible_user=dev2 ansible_port=2222
10.10.43.241 ansible_user=dev2 ansible_port=2222
10.10.43.242 ansible_user=dev2 ansible_port=2222
10.10.43.243 ansible_user=dev2 ansible_port=2222
10.10.43.43 ansible_user=dev2 ansible_port=2222
10.10.43.98 ansible_user=dev2 ansible_port=2222

View File

@@ -0,0 +1,29 @@
[all]
tmp_k8s_master_1 ansible_host=10.10.43.51 ip=10.10.43.51 ansible_user=dev2 ansible_port=2222
tmp_k8s_master_2 ansible_host=10.10.43.52 ip=10.10.43.52 ansible_user=dev2 ansible_port=2222
tmp_k8s_master_3 ansible_host=10.10.43.53 ip=10.10.43.53 ansible_user=dev2 ansible_port=2222
tmp_k8s_worker_1 ansible_host=10.10.43.54 ip=10.10.43.54 ansible_user=dev2 ansible_port=2222
tmp_k8s_worker_2 ansible_host=10.10.43.55 ip=10.10.43.55 ansible_user=dev2 ansible_port=2222
tmp_k8s_worker_3 ansible_host=10.10.43.56 ip=10.10.43.56 ansible_user=dev2 ansible_port=2222
[kube_control_plane]
tmp_k8s_master_1
tmp_k8s_master_2
tmp_k8s_master_3
[etcd]
tmp_k8s_master_1
tmp_k8s_master_2
tmp_k8s_master_3
[kube_node]
tmp_k8s_worker_1
tmp_k8s_worker_2
tmp_k8s_worker_3
[calico_rr]
[k8s_cluster:children]
kube_control_plane
kube_node
calico_rr

View File

@@ -0,0 +1,11 @@
---
- hosts: all
become: true
roles:
- password_change
vars:
username: dev2-iac
adminuser: root
#manual_password: saasadmin1234
sshmainport: 2222
debug_mode: True

View File

@@ -0,0 +1,11 @@
---
- hosts: all
become: true
roles:
- password_change
vars:
username: dev2
adminuser: root
#manual_password: saasadmin1234
sshmainport: 2222
debug_mode: True

View File

@@ -0,0 +1,10 @@
---
- name: "restart"
hosts: all
become: yes
tasks:
- name: Restart teleport service
ansible.builtin.systemd:
name: teleport
enabled: true
state: restarted

BIN
ansible/01_old/roles/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@@ -0,0 +1,140 @@
helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f
helm_version: v3.10.2
kubernetes_version: 1.23.16
kubernetes_kubelet_extra_args: ""
kubernetes_kubeadm_init_extra_opts: ""
kubernetes_join_command_extra_opts: ""
kubernetes_pod_network:
cni: 'calico'
cidr: '10.96.0.0/12'
kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml
kubernetes_metric_server_file: https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
containerd_config:
version: 2
root: /var/lib/containerd
state: /run/containerd
plugin_dir: ""
disabled_plugins: []
required_plugins: []
oom_score: 0
grpc:
address: /run/containerd/containerd.sock
tcp_address: ""
tcp_tls_cert: ""
tcp_tls_key: ""
uid: 0
gid: 0
max_recv_message_size: 16777216
max_send_message_size: 16777216
ttrpc:
address: ""
uid: 0
gid: 0
debug:
address: ""
uid: 0
gid: 0
level: ""
metrics:
address: ""
grpc_histogram: false
cgroup:
path: ""
timeouts:
"io.containerd.timeout.shim.cleanup": 5s
"io.containerd.timeout.shim.load": 5s
"io.containerd.timeout.shim.shutdown": 3s
"io.containerd.timeout.task.state": 2s
plugins:
"io.containerd.gc.v1.scheduler":
pause_threshold: 0.02
deletion_threshold: 0
mutation_threshold: 100
schedule_delay: 0s
startup_delay: 100ms
"io.containerd.grpc.v1.cri":
disable_tcp_service: true
stream_server_address: 127.0.0.1
stream_server_port: "0"
stream_idle_timeout: 4h0m0s
enable_selinux: false
sandbox_image: k8s.gcr.io/pause:3.1
stats_collect_period: 10
systemd_cgroup: false
enable_tls_streaming: false
max_container_log_line_size: 16384
disable_cgroup: false
disable_apparmor: false
restrict_oom_score_adj: false
max_concurrent_downloads: 3
disable_proc_mount: false
containerd:
snapshotter: overlayfs
default_runtime_name: runc
no_pivot: false
default_runtime:
runtime_type: ""
runtime_engine: ""
runtime_root: ""
privileged_without_host_devices: false
untrusted_workload_runtime:
runtime_type: ""
runtime_engine: ""
runtime_root: ""
privileged_without_host_devices: false
runtimes:
runc:
runtime_type: io.containerd.runc.v1
runtime_engine: ""
runtime_root: ""
privileged_without_host_devices: false
cni:
bin_dir: /opt/cni/bin
conf_dir: /etc/cni/net.d
max_conf_num: 1
conf_template: ""
registry:
configs:
"10.10.31.243:5000":
tls:
insecure_skip_verify: true
mirrors:
"docker.io":
endpoint:
- https://registry-1.docker.io
"10.10.31.243:5000":
endpoint:
- http://10.10.31.243:5000
x509_key_pair_streaming:
tls_cert_file: ""
tls_key_file: ""
"io.containerd.internal.v1.opt":
path: /opt/containerd
"io.containerd.internal.v1.restart":
interval: 10s
"io.containerd.metadata.v1.bolt":
content_sharing_policy: shared
"io.containerd.monitor.v1.cgroups":
no_prometheus: false
"io.containerd.runtime.v1.linux":
shim: containerd-shim
runtime: runc
runtime_root: ""
no_shim: false
shim_debug: false
"io.containerd.runtime.v2.task":
platforms:
- linux/amd64
"io.containerd.service.v1.diff-service":
default:
- walking
"io.containerd.snapshotter.v1.devmapper":
root_path: ""
pool_name: ""
base_image_size: ""

View File

@@ -0,0 +1,645 @@
#!/bin/sh
set -e
# Docker CE for Linux installation script
#
# See https://docs.docker.com/engine/install/ for the installation steps.
#
# This script is meant for quick & easy install via:
# $ curl -fsSL https://get.docker.com -o get-docker.sh
# $ sh get-docker.sh
#
# For test builds (ie. release candidates):
# $ curl -fsSL https://test.docker.com -o test-docker.sh
# $ sh test-docker.sh
#
# NOTE: Make sure to verify the contents of the script
# you downloaded matches the contents of install.sh
# located at https://github.com/docker/docker-install
# before executing.
#
# Git commit from https://github.com/docker/docker-install when
# the script was uploaded (Should only be modified by upload job):
SCRIPT_COMMIT_SHA="66474034547a96caa0a25be56051ff8b726a1b28"
# strip "v" prefix if present
VERSION="${VERSION#v}"
# The channel to install from:
# * nightly
# * test
# * stable
# * edge (deprecated)
DEFAULT_CHANNEL_VALUE="stable"
if [ -z "$CHANNEL" ]; then
CHANNEL=$DEFAULT_CHANNEL_VALUE
fi
DEFAULT_DOWNLOAD_URL="https://download.docker.com"
if [ -z "$DOWNLOAD_URL" ]; then
DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
fi
DEFAULT_REPO_FILE="docker-ce.repo"
if [ -z "$REPO_FILE" ]; then
REPO_FILE="$DEFAULT_REPO_FILE"
fi
mirror=''
DRY_RUN=${DRY_RUN:-}
while [ $# -gt 0 ]; do
case "$1" in
--mirror)
mirror="$2"
shift
;;
--dry-run)
DRY_RUN=1
;;
--*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
case "$mirror" in
Aliyun)
DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
;;
AzureChinaCloud)
DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
;;
esac
command_exists() {
command -v "$@" > /dev/null 2>&1
}
# version_gte checks if the version specified in $VERSION is at least
# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either
# unset (=latest) or newer or equal than the specified version. Returns 1 (fail)
# otherwise.
#
# examples:
#
# VERSION=20.10
# version_gte 20.10 // 0 (success)
# version_gte 19.03 // 0 (success)
# version_gte 21.10 // 1 (fail)
version_gte() {
if [ -z "$VERSION" ]; then
return 0
fi
eval calver_compare "$VERSION" "$1"
}
# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success)
# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch
# releases and pre-release (-alpha/-beta) are not taken into account
#
# examples:
#
# calver_compare 20.10 19.03 // 0 (success)
# calver_compare 20.10 20.10 // 0 (success)
# calver_compare 19.03 20.10 // 1 (fail)
calver_compare() (
set +x
yy_a="$(echo "$1" | cut -d'.' -f1)"
yy_b="$(echo "$2" | cut -d'.' -f1)"
if [ "$yy_a" -lt "$yy_b" ]; then
return 1
fi
if [ "$yy_a" -gt "$yy_b" ]; then
return 0
fi
mm_a="$(echo "$1" | cut -d'.' -f2)"
mm_b="$(echo "$2" | cut -d'.' -f2)"
if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then
return 1
fi
return 0
)
is_dry_run() {
if [ -z "$DRY_RUN" ]; then
return 1
else
return 0
fi
}
is_wsl() {
case "$(uname -r)" in
*microsoft* ) true ;; # WSL 2
*Microsoft* ) true ;; # WSL 1
* ) false;;
esac
}
is_darwin() {
case "$(uname -s)" in
*darwin* ) true ;;
*Darwin* ) true ;;
* ) false;;
esac
}
deprecation_notice() {
distro=$1
distro_version=$2
echo
printf "\033[91;1mDEPRECATION WARNING\033[0m\n"
printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version"
echo " No updates or security fixes will be released for this distribution, and users are recommended"
echo " to upgrade to a currently maintained version of $distro."
echo
printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue."
echo
sleep 10
}
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist"
}
echo_docker_as_nonroot() {
if is_dry_run; then
return
fi
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker version'
) || true
fi
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
echo
echo "================================================================================"
echo
if version_gte "20.10"; then
echo "To run Docker as a non-privileged user, consider setting up the"
echo "Docker daemon in rootless mode for your user:"
echo
echo " dockerd-rootless-setuptool.sh install"
echo
echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
echo
fi
echo
echo "To run the Docker daemon as a fully privileged service, but granting non-root"
echo "users access, refer to https://docs.docker.com/go/daemon-access/"
echo
echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
echo " documentation for details: https://docs.docker.com/go/attack-surface/"
echo
echo "================================================================================"
echo
}
# Check if this is a forked Linux distro
check_forked() {
# Check for lsb_release command existence, it usually exists in forked distros
if command_exists lsb_release; then
# Check if the `-u` option is supported
set +e
lsb_release -a -u > /dev/null 2>&1
lsb_release_exit_code=$?
set -e
# Check if the command has exited successfully, it means we're in a forked distro
if [ "$lsb_release_exit_code" = "0" ]; then
# Print info about current distro
cat <<-EOF
You're using '$lsb_dist' version '$dist_version'.
EOF
# Get the upstream release info
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
# Print info about upstream distro
cat <<-EOF
Upstream release is '$lsb_dist' version '$dist_version'.
EOF
else
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
if [ "$lsb_dist" = "osmc" ]; then
# OSMC runs Raspbian
lsb_dist=raspbian
else
# We're Debian and don't even know it!
lsb_dist=debian
fi
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
fi
fi
fi
}
do_install() {
echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
if command_exists docker; then
cat >&2 <<-'EOF'
Warning: the "docker" command appears to already exist on this system.
If you already have Docker installed, this script can cause trouble, which is
why we're displaying this warning and provide the opportunity to cancel the
installation.
If you installed the current Docker package using this script and are using it
again to update Docker, you can safely ignore this message.
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo -E sh -c'
elif command_exists su; then
sh_c='su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit 1
fi
fi
if is_dry_run; then
sh_c="echo"
fi
# perform some very rudimentary platform detection
lsb_dist=$( get_distribution )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
if is_wsl; then
echo
echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
cat >&2 <<-'EOF'
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
case "$lsb_dist" in
ubuntu)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian|raspbian)
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
;;
centos|rhel|sles)
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
*)
if command_exists lsb_release; then
dist_version="$(lsb_release --release | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
# Check if this is a forked Linux distro
check_forked
# Print deprecation warnings for distro versions that recently reached EOL,
# but may still be commonly used (especially LTS versions).
case "$lsb_dist.$dist_version" in
debian.stretch|debian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
raspbian.stretch|raspbian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
ubuntu.xenial|ubuntu.trusty)
deprecation_notice "$lsb_dist" "$dist_version"
;;
fedora.*)
if [ "$dist_version" -lt 33 ]; then
deprecation_notice "$lsb_dist" "$dist_version"
fi
;;
esac
# Run setup for each distro accordingly
case "$lsb_dist" in
ubuntu|debian|raspbian)
pre_reqs="apt-transport-https ca-certificates curl"
if ! command -v gpg > /dev/null; then
pre_reqs="$pre_reqs gnupg"
fi
apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
(
if ! is_dry_run; then
set -x
fi
$sh_c 'apt-get update -qq >/dev/null'
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
$sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings'
$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg"
$sh_c "chmod a+r /etc/apt/keyrings/docker.gpg"
$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
$sh_c 'apt-get update -qq >/dev/null'
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g")"
search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
echo
exit 1
fi
if version_gte "18.09"; then
search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
echo "INFO: $search_command"
cli_pkg_version="=$($sh_c "$search_command")"
fi
pkg_version="=$pkg_version"
fi
fi
(
pkgs="docker-ce${pkg_version%=}"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io"
fi
if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
# also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
pkgs="$pkgs docker-scan-plugin"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pkgs >/dev/null"
)
echo_docker_as_nonroot
exit 0
;;
centos|fedora|rhel)
if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then
echo "Packages for RHEL are currently only available for s390x."
exit 1
fi
if [ "$lsb_dist" = "fedora" ]; then
pkg_manager="dnf"
config_manager="dnf config-manager"
enable_channel_flag="--set-enabled"
disable_channel_flag="--set-disabled"
pre_reqs="dnf-plugins-core"
pkg_suffix="fc$dist_version"
else
pkg_manager="yum"
config_manager="yum-config-manager"
enable_channel_flag="--enable"
disable_channel_flag="--disable"
pre_reqs="yum-utils"
pkg_suffix="el"
fi
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
(
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pre_reqs"
$sh_c "$config_manager --add-repo $repo_file_url"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "$config_manager $disable_channel_flag docker-ce-*"
$sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
fi
$sh_c "$pkg_manager makecache"
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
echo
exit 1
fi
if version_gte "18.09"; then
# older versions don't support a cli package
search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
fi
# Cut out the epoch and prefix with a '-'
pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
if [ -n "$cli_pkg_version" ]; then
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
# also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
pkgs="$pkgs docker-scan-plugin"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pkgs"
)
echo_docker_as_nonroot
exit 0
;;
sles)
if [ "$(uname -m)" != "s390x" ]; then
echo "Packages for SLES are currently only available for s390x"
exit 1
fi
if [ "$dist_version" = "15.3" ]; then
sles_version="SLE_15_SP3"
else
sles_minor_version="${dist_version##*.}"
sles_version="15.$sles_minor_version"
fi
opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/$sles_version/security:SELinux.repo"
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
pre_reqs="ca-certificates curl libseccomp2 awk"
(
if ! is_dry_run; then
set -x
fi
$sh_c "zypper install -y $pre_reqs"
$sh_c "zypper addrepo $repo_file_url"
if ! is_dry_run; then
cat >&2 <<-'EOF'
WARNING!!
openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now.
Do you wish to continue?
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 30 )
fi
$sh_c "zypper addrepo $opensuse_repo"
$sh_c "zypper --gpg-auto-import-keys refresh"
$sh_c "zypper lr -d"
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")"
search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst zypper list results"
echo
exit 1
fi
search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
# It's okay for cli_pkg_version to be blank, since older versions don't support a cli package
cli_pkg_version="$($sh_c "$search_command")"
pkg_version="-$pkg_version"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
if [ -n "$cli_pkg_version" ]; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "zypper -q install -y $pkgs"
)
echo_docker_as_nonroot
exit 0
;;
*)
if [ -z "$lsb_dist" ]; then
if is_darwin; then
echo
echo "ERROR: Unsupported operating system 'macOS'"
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
exit 1
fi
fi
echo
echo "ERROR: Unsupported distribution '$lsb_dist'"
echo
exit 1
;;
esac
exit 1
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,445 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.2.1
- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1
- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today"
### 4.2.0
- Support for Kubernetes v1.19.0 was removed
- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0"
- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name"
- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1"
- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16"
- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process"
- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix"
- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check"
- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16"
- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0"
- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process"
- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable"
- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15"
- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2"
- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format"
- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API"
- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide"
- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs"
- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14"
- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0"
- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5"
- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement"
- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver"
- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step"
- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha"
- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path"
- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases"
- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds"
- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps"
- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)"
- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard"
- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0"
- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2"
- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos"
- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it"
- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0"
- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0"
- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3"
- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1"
### 4.1.2
- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed"
- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter"
- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart"
### 4.1.0
- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script"
- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6"
- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod"
- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty"
- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector"
- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies"
- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md"
- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing"
- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist"
- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one"
- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement"
- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation"
- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0"
### 4.0.18
- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build"
- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build"
- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge"
- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241"
- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts"
- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error"
- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation"
- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric"
- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code."
- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image"
- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests"
- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint"
- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1"
- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0"
- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint"
- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial"
- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera"
- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment"
- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation"
- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell"
- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor"
- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations"
- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0"
- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account"
- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description"
- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests"
- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values"
- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1"
- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs"
- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits"
- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations"
- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT"
### 4.0.15
- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1
- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6
- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs
- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors
- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release
- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P…
- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch
- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable
- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart
- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0
- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0
- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation
- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045)
- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues
- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543
- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name
- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners
- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option
- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags
- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified
- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page
- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation
- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations
- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs
- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files
- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml
- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide
- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile
- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud
- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement
- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation.
- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs
- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server
- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog
- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition
### 4.0.14
- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md
### 4.0.13
- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml
### 4.0.12
- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs
### 4.0.11
- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional.
### 4.0.10
- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0
### 4.0.9
- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources
### 4.0.7
- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx
- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service
### 4.0.6
- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx
- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart
- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode
- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1
### 4.0.5
- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx
### 4.0.3
- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx
### 4.0.2
- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx
### 4.0.1
- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx
### 3.34.0
- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates
### 3.33.0
- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1
### 3.32.0
- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA
### 3.31.0
- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes
### 3.30.0
- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints
### 3.29.0
- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor
### 3.28.0
- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs
### 3.27.0
- Update ingress-nginx v0.45.0
### 3.26.0
- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics
### 3.25.0
- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken
### 3.24.0
- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment
### 3.23.0
- Update ingress-nginx v0.44.0
### 3.22.0
- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file
- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart
### 3.21.0
- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject
- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values
- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled
- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1
### 3.20.1
- Do not create KEDA in case of DaemonSets.
- Fix KEDA v2 definition
### 3.20.0
- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled.
### 3.19.0
- Update ingress-nginx v0.43.0
### 3.18.0
- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy
- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters
### 3.17.0
- Update ingress-nginx v0.42.0
### 3.16.1
- Fix chart-releaser action
### 3.16.0
- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service
### 3.15.1
- Fix chart-releaser action
### 3.15.0
- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml
### 3.14.0
- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend
### 3.13.0
- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable
### 3.12.0
- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs
### 3.11.1
- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling
### 3.11.0
- Support Keda Autoscaling
### 3.10.1
- Fix regression introduced in 0.41.0 with external authentication
### 3.10.0
- Fix routing regression introduced in 0.41.0 with PathType Exact
### 3.9.0
- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling
### 3.8.0
- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image
- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs
- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend
- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations
- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog
### 3.7.1
- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart
### 3.7.0
- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315)
### 3.6.0
- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector
### 3.5.1
- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release
### 3.5.0
- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations
### 3.4.0
- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288
### 3.3.1
- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart
- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link
- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0
### 3.3.1
- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test
### 3.3.0
- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values
- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort
- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression
- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules
### 3.0.0
- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements
### 2.16.0
- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller
### 2.15.0
- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec
### 2.14.0
- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration
### 2.13.0
- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
### 2.13.0
- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip
### 2.12.1
- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples
### 2.12.0
- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels
- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting
### 2.11.3
- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH
### 2.11.2
- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version
### 2.11.1
- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1
### 2.11.0
- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0
- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe
### 2.10.0
- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image
### 2.9.1
- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823)
### 2.9.0
- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues
### TODO
Keep building the changelog using *git log charts* checking the tag

View File

@@ -0,0 +1,23 @@
annotations:
artifacthub.io/changes: |
- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today"
- "fix permissions about configmap"
artifacthub.io/prerelease: "false"
apiVersion: v2
appVersion: 1.3.1
description: Ingress controller for Kubernetes using NGINX as a reverse proxy and
load balancer
home: https://github.com/kubernetes/ingress-nginx
icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png
keywords:
- ingress
- nginx
kubeVersion: '>=1.20.0-0'
maintainers:
- name: rikatz
- name: strongjz
- name: tao12345666333
name: ingress-nginx
sources:
- https://github.com/kubernetes/ingress-nginx
version: 4.2.5

View File

@@ -0,0 +1,10 @@
# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
approvers:
- ingress-nginx-helm-maintainers
reviewers:
- ingress-nginx-helm-reviewers
labels:
- area/helm

View File

@@ -0,0 +1,494 @@
# ingress-nginx
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square)
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Chart version 3.x.x: Kubernetes v1.16+
- Chart version 4.x.x and above: Kubernetes v1.19+
## Get Repo Info
```console
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
```
## Install Chart
**Important:** only helm3 is supported
```console
helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
```
The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
## Upgrading Chart
```console
helm upgrade [RELEASE_NAME] [CHART] --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading With Zero Downtime in Production
By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
### Migrating from stable/nginx-ingress
There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
1. For critical services in production that require zero-downtime, you will want to:
1. [Install](#install-chart) a second Ingress controller
1. Redirect your DNS traffic from the old controller to the new controller
1. Log traffic from both controllers during this changeover
1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values ingress-nginx/ingress-nginx
```
### PodDisruptionBudget
Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
### Prometheus Metrics
The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
### ingress-nginx nginx\_status page/stats server
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
### ExternalDNS Service Configuration
Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
```yaml
controller:
service:
annotations:
external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
```
### AWS L7 ELB with SSL Termination
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
```yaml
controller:
service:
targetPorts:
http: http
https: http
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
```
### AWS route53-mapper
To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label:
```yaml
controller:
service:
labels:
dns: "route53"
annotations:
domainName: "kubernetes-example.com"
```
### Additional Internal Load Balancer
This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
You'll need to set both the following values:
`controller.service.internal.enabled`
`controller.service.internal.annotations`
If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
`controller.service.internal.annotations` varies with the cloud service you're using.
Example for AWS:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal ELB
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for GCE:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# For GKE versions 1.17 and later
networking.gke.io/load-balancer-type: "Internal"
# For earlier versions
# cloud.google.com/load-balancer-type: "Internal"
# Any other annotation can be declared here.
```
Example for Azure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for Oracle Cloud Infrastructure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/oci-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
### Ingress Admission Webhooks
With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
**This feature is enabled by default since 0.31.0.**
With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
```console
Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
```
Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
## Requirements
Kubernetes: `>=1.20.0-0`
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| commonLabels | object | `{}` | |
| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers |
| controller.admissionWebhooks.annotations | object | `{}` | |
| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | |
| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | |
| controller.admissionWebhooks.enabled | bool | `true` | |
| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set |
| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use |
| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | |
| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks |
| controller.admissionWebhooks.namespaceSelector | object | `{}` | |
| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | |
| controller.admissionWebhooks.objectSelector | object | `{}` | |
| controller.admissionWebhooks.patch.enabled | bool | `true` | |
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | |
| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | |
| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | |
| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | |
| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources |
| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | |
| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | |
| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # |
| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | |
| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | |
| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | |
| controller.admissionWebhooks.patch.tolerations | list | `[]` | |
| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | |
| controller.admissionWebhooks.port | int | `8443` | |
| controller.admissionWebhooks.service.annotations | object | `{}` | |
| controller.admissionWebhooks.service.externalIPs | list | `[]` | |
| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | |
| controller.admissionWebhooks.service.servicePort | int | `443` | |
| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | |
| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # |
| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected |
| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # |
| controller.autoscaling.behavior | object | `{}` | |
| controller.autoscaling.enabled | bool | `false` | |
| controller.autoscaling.maxReplicas | int | `11` | |
| controller.autoscaling.minReplicas | int | `1` | |
| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
| controller.autoscalingTemplate | list | `[]` | |
| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ |
| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. |
| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) |
| controller.containerName | string | `"controller"` | Configures the controller container name |
| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on |
| controller.customTemplate.configMapKey | string | `""` | |
| controller.customTemplate.configMapName | string | `""` | |
| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. |
| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. |
| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update |
| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # |
| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use |
| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. |
| controller.extraEnvs | list | `[]` | Additional environment variables to set |
| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. |
| controller.extraModules | list | `[]` | |
| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. |
| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. |
| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. |
| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. |
| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged |
| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not |
| controller.hostPort.ports.http | int | `80` | 'hostPort' http port |
| controller.hostPort.ports.https | int | `443` | 'hostPort' https port |
| controller.hostname | object | `{}` | Optionally customize the pod hostname. |
| controller.image.allowPrivilegeEscalation | bool | `true` | |
| controller.image.chroot | bool | `false` | |
| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | |
| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | |
| controller.image.image | string | `"ingress-nginx/controller"` | |
| controller.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.image.registry | string | `"registry.k8s.io"` | |
| controller.image.runAsUser | int | `101` | |
| controller.image.tag | string | `"v1.3.1"` | |
| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation |
| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). |
| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass |
| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster |
| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not |
| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass |
| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. |
| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | |
| controller.keda.behavior | object | `{}` | |
| controller.keda.cooldownPeriod | int | `300` | |
| controller.keda.enabled | bool | `false` | |
| controller.keda.maxReplicas | int | `11` | |
| controller.keda.minReplicas | int | `1` | |
| controller.keda.pollingInterval | int | `30` | |
| controller.keda.restoreToOriginalReplicaCount | bool | `false` | |
| controller.keda.scaledObject.annotations | object | `{}` | |
| controller.keda.triggers | list | `[]` | |
| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` |
| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # |
| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # |
| controller.livenessProbe.failureThreshold | int | `5` | |
| controller.livenessProbe.httpGet.path | string | `"/healthz"` | |
| controller.livenessProbe.httpGet.port | int | `10254` | |
| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | |
| controller.livenessProbe.initialDelaySeconds | int | `10` | |
| controller.livenessProbe.periodSeconds | int | `10` | |
| controller.livenessProbe.successThreshold | int | `1` | |
| controller.livenessProbe.timeoutSeconds | int | `1` | |
| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases |
| controller.metrics.enabled | bool | `false` | |
| controller.metrics.port | int | `10254` | |
| controller.metrics.prometheusRule.additionalLabels | object | `{}` | |
| controller.metrics.prometheusRule.enabled | bool | `false` | |
| controller.metrics.prometheusRule.rules | list | `[]` | |
| controller.metrics.service.annotations | object | `{}` | |
| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | |
| controller.metrics.service.servicePort | int | `10254` | |
| controller.metrics.service.type | string | `"ClusterIP"` | |
| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | |
| controller.metrics.serviceMonitor.enabled | bool | `false` | |
| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | |
| controller.metrics.serviceMonitor.namespace | string | `""` | |
| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | |
| controller.metrics.serviceMonitor.relabelings | list | `[]` | |
| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | |
| controller.metrics.serviceMonitor.targetLabels | list | `[]` | |
| controller.minAvailable | int | `1` | |
| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # |
| controller.name | string | `"controller"` | |
| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # |
| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # |
| controller.podLabels | object | `{}` | Labels to add to the pod container metadata |
| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods |
| controller.priorityClassName | string | `""` | |
| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers |
| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. |
| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not |
| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be <namespace>/<service_name> |
| controller.readinessProbe.failureThreshold | int | `3` | |
| controller.readinessProbe.httpGet.path | string | `"/healthz"` | |
| controller.readinessProbe.httpGet.port | int | `10254` | |
| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | |
| controller.readinessProbe.initialDelaySeconds | int | `10` | |
| controller.readinessProbe.periodSeconds | int | `10` | |
| controller.readinessProbe.successThreshold | int | `1` | |
| controller.readinessProbe.timeoutSeconds | int | `1` | |
| controller.replicaCount | int | `1` | |
| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply |
| controller.resources.requests.cpu | string | `"100m"` | |
| controller.resources.requests.memory | string | `"90Mi"` | |
| controller.scope.enabled | bool | `false` | Enable 'scope' or not |
| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) |
| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. |
| controller.service.annotations | object | `{}` | |
| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # |
| controller.service.enableHttp | bool | `true` | |
| controller.service.enableHttps | bool | `true` | |
| controller.service.enabled | bool | `true` | |
| controller.service.external.enabled | bool | `true` | |
| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. |
| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). |
| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. |
| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
| controller.service.labels | object | `{}` | |
| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer |
| controller.service.loadBalancerSourceRanges | list | `[]` | |
| controller.service.nodePorts.http | string | `""` | |
| controller.service.nodePorts.https | string | `""` | |
| controller.service.nodePorts.tcp | object | `{}` | |
| controller.service.nodePorts.udp | object | `{}` | |
| controller.service.ports.http | int | `80` | |
| controller.service.ports.https | int | `443` | |
| controller.service.targetPorts.http | string | `"http"` | |
| controller.service.targetPorts.https | string | `"https"` | |
| controller.service.type | string | `"LoadBalancer"` | |
| controller.shareProcessNamespace | bool | `false` | |
| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls |
| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap |
| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) |
| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # |
| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # |
| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap |
| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) |
| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false |
| defaultBackend.affinity | object | `{}` | |
| defaultBackend.autoscaling.annotations | object | `{}` | |
| defaultBackend.autoscaling.enabled | bool | `false` | |
| defaultBackend.autoscaling.maxReplicas | int | `2` | |
| defaultBackend.autoscaling.minReplicas | int | `1` | |
| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
| defaultBackend.enabled | bool | `false` | |
| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| defaultBackend.extraArgs | object | `{}` | |
| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods |
| defaultBackend.extraVolumeMounts | list | `[]` | |
| defaultBackend.extraVolumes | list | `[]` | |
| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | |
| defaultBackend.image.image | string | `"defaultbackend-amd64"` | |
| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | |
| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | |
| defaultBackend.image.registry | string | `"registry.k8s.io"` | |
| defaultBackend.image.runAsNonRoot | bool | `true` | |
| defaultBackend.image.runAsUser | int | `65534` | |
| defaultBackend.image.tag | string | `"1.5"` | |
| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources |
| defaultBackend.livenessProbe.failureThreshold | int | `3` | |
| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | |
| defaultBackend.livenessProbe.periodSeconds | int | `10` | |
| defaultBackend.livenessProbe.successThreshold | int | `1` | |
| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | |
| defaultBackend.minAvailable | int | `1` | |
| defaultBackend.name | string | `"defaultbackend"` | |
| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # |
| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # |
| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata |
| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
| defaultBackend.port | int | `8080` | |
| defaultBackend.priorityClassName | string | `""` | |
| defaultBackend.readinessProbe.failureThreshold | int | `6` | |
| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | |
| defaultBackend.readinessProbe.periodSeconds | int | `5` | |
| defaultBackend.readinessProbe.successThreshold | int | `1` | |
| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | |
| defaultBackend.replicaCount | int | `1` | |
| defaultBackend.resources | object | `{}` | |
| defaultBackend.service.annotations | object | `{}` | |
| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | |
| defaultBackend.service.servicePort | int | `80` | |
| defaultBackend.service.type | string | `"ClusterIP"` | |
| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | |
| defaultBackend.serviceAccount.create | bool | `true` | |
| defaultBackend.serviceAccount.name | string | `""` | |
| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param |
| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ |
| podSecurityPolicy.enabled | bool | `false` | |
| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration |
| rbac.create | bool | `true` | |
| rbac.scope | bool | `false` | |
| revisionHistoryLimit | int | `10` | Rollback limit # |
| serviceAccount.annotations | object | `{}` | Annotations for the controller service account |
| serviceAccount.automountServiceAccountToken | bool | `true` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |
| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |

View File

@@ -0,0 +1,235 @@
{{ template "chart.header" . }}
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Chart version 3.x.x: Kubernetes v1.16+
- Chart version 4.x.x and above: Kubernetes v1.19+
## Get Repo Info
```console
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
```
## Install Chart
**Important:** only helm3 is supported
```console
helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
```
The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
## Upgrading Chart
```console
helm upgrade [RELEASE_NAME] [CHART] --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading With Zero Downtime in Production
By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
### Migrating from stable/nginx-ingress
There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
1. For critical services in production that require zero-downtime, you will want to:
1. [Install](#install-chart) a second Ingress controller
1. Redirect your DNS traffic from the old controller to the new controller
1. Log traffic from both controllers during this changeover
1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values ingress-nginx/ingress-nginx
```
### PodDisruptionBudget
Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
### Prometheus Metrics
The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
### ingress-nginx nginx\_status page/stats server
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
### ExternalDNS Service Configuration
Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
```yaml
controller:
service:
annotations:
external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
```
### AWS L7 ELB with SSL Termination
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
```yaml
controller:
service:
targetPorts:
http: http
https: http
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
```
### AWS route53-mapper
To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label:
```yaml
controller:
service:
labels:
dns: "route53"
annotations:
domainName: "kubernetes-example.com"
```
### Additional Internal Load Balancer
This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
You'll need to set both the following values:
`controller.service.internal.enabled`
`controller.service.internal.annotations`
If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
`controller.service.internal.annotations` varies with the cloud service you're using.
Example for AWS:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal ELB
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for GCE:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# For GKE versions 1.17 and later
networking.gke.io/load-balancer-type: "Internal"
# For earlier versions
# cloud.google.com/load-balancer-type: "Internal"
# Any other annotation can be declared here.
```
Example for Azure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for Oracle Cloud Infrastructure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/oci-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
### Ingress Admission Webhooks
With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
**This feature is enabled by default since 0.31.0.**
With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
```console
Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
```
Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@@ -0,0 +1,7 @@
controller:
watchIngressWithoutClass: true
ingressClassResource:
name: custom-nginx
enabled: true
default: true
controllerValue: "k8s.io/custom-nginx"

View File

@@ -0,0 +1,14 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
kind: DaemonSet
allowSnippetAnnotations: false
admissionWebhooks:
enabled: false
service:
type: ClusterIP
config:
use-proxy-protocol: "true"

View File

@@ -0,0 +1,22 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort
nodePorts:
tcp:
9000: 30090
udp:
9001: 30091
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
service:
type: ClusterIP
extraModules:
- name: opentelemetry
image: busybox

View File

@@ -0,0 +1,14 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
addHeaders:
X-Frame-Options: deny
proxySetHeaders:
X-Forwarded-Proto: https
service:
type: ClusterIP

View File

@@ -0,0 +1,14 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
internal:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "true"

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort

View File

@@ -0,0 +1,17 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP
podAnnotations:
prometheus.io/path: /metrics
prometheus.io/port: "10254"
prometheus.io/scheme: http
prometheus.io/scrape: "true"

View File

@@ -0,0 +1,20 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
configMapNamespace: default
udp:
configMapNamespace: default
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,18 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"
portNamePrefix: "port"

View File

@@ -0,0 +1,16 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,14 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
9001: "default/test:8080"

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,12 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,13 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,13 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,14 @@
controller:
autoscaling:
enabled: true
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Pods
value: 1
periodSeconds: 180
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,11 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
autoscaling:
enabled: true
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,12 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
config:
use-proxy-protocol: "true"
allowSnippetAnnotations: false
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,20 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort
nodePorts:
tcp:
9000: 30090
udp:
9001: 30091
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,8 @@
# Left blank to test default values
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP

View File

@@ -0,0 +1,10 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP
extraModules:
- name: opentelemetry
image: busybox

View File

@@ -0,0 +1,13 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
addHeaders:
X-Frame-Options: deny
proxySetHeaders:
X-Forwarded-Proto: https
service:
type: ClusterIP

View File

@@ -0,0 +1,13 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
internal:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "true"

View File

@@ -0,0 +1,11 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,9 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort

View File

@@ -0,0 +1,16 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP
podAnnotations:
prometheus.io/path: /metrics
prometheus.io/port: "10254"
prometheus.io/scheme: http
prometheus.io/scrape: "true"

View File

@@ -0,0 +1,10 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,19 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
configMapNamespace: default
udp:
configMapNamespace: default
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,17 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"
portNamePrefix: "port"

View File

@@ -0,0 +1,15 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,11 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
9001: "default/test:8080"

View File

@@ -0,0 +1,12 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,12 @@
controller:
service:
type: ClusterIP
admissionWebhooks:
enabled: true
extraEnvs:
- name: FOO
value: foo
- name: TEST
value: test
patch:
enabled: true

View File

@@ -0,0 +1,23 @@
controller:
service:
type: ClusterIP
admissionWebhooks:
enabled: true
createSecretJob:
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
patchWebhookJob:
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
patch:
enabled: true

View File

@@ -0,0 +1,9 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP

Some files were not shown because too many files have changed in this diff Show More